Bluetooth: Add support for max_tx_power in Get Conn Info
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 6
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 MGMT_OP_GET_CONN_INFO,
87 };
88
89 static const u16 mgmt_events[] = {
90 MGMT_EV_CONTROLLER_ERROR,
91 MGMT_EV_INDEX_ADDED,
92 MGMT_EV_INDEX_REMOVED,
93 MGMT_EV_NEW_SETTINGS,
94 MGMT_EV_CLASS_OF_DEV_CHANGED,
95 MGMT_EV_LOCAL_NAME_CHANGED,
96 MGMT_EV_NEW_LINK_KEY,
97 MGMT_EV_NEW_LONG_TERM_KEY,
98 MGMT_EV_DEVICE_CONNECTED,
99 MGMT_EV_DEVICE_DISCONNECTED,
100 MGMT_EV_CONNECT_FAILED,
101 MGMT_EV_PIN_CODE_REQUEST,
102 MGMT_EV_USER_CONFIRM_REQUEST,
103 MGMT_EV_USER_PASSKEY_REQUEST,
104 MGMT_EV_AUTH_FAILED,
105 MGMT_EV_DEVICE_FOUND,
106 MGMT_EV_DISCOVERING,
107 MGMT_EV_DEVICE_BLOCKED,
108 MGMT_EV_DEVICE_UNBLOCKED,
109 MGMT_EV_DEVICE_UNPAIRED,
110 MGMT_EV_PASSKEY_NOTIFY,
111 MGMT_EV_NEW_IRK,
112 MGMT_EV_NEW_CSRK,
113 };
114
115 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
116
117 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
118 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
119
120 struct pending_cmd {
121 struct list_head list;
122 u16 opcode;
123 int index;
124 void *param;
125 struct sock *sk;
126 void *user_data;
127 };
128
129 /* HCI to MGMT error code conversion table */
130 static u8 mgmt_status_table[] = {
131 MGMT_STATUS_SUCCESS,
132 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
133 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
134 MGMT_STATUS_FAILED, /* Hardware Failure */
135 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
136 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
137 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
138 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
139 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
140 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
141 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
142 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
143 MGMT_STATUS_BUSY, /* Command Disallowed */
144 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
145 MGMT_STATUS_REJECTED, /* Rejected Security */
146 MGMT_STATUS_REJECTED, /* Rejected Personal */
147 MGMT_STATUS_TIMEOUT, /* Host Timeout */
148 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
149 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
150 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
151 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
152 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
153 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
154 MGMT_STATUS_BUSY, /* Repeated Attempts */
155 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
156 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
157 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
158 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
159 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
160 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
162 MGMT_STATUS_FAILED, /* Unspecified Error */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
164 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
165 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
166 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
167 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
168 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
169 MGMT_STATUS_FAILED, /* Unit Link Key Used */
170 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
171 MGMT_STATUS_TIMEOUT, /* Instant Passed */
172 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
173 MGMT_STATUS_FAILED, /* Transaction Collision */
174 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
175 MGMT_STATUS_REJECTED, /* QoS Rejected */
176 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
177 MGMT_STATUS_REJECTED, /* Insufficient Security */
178 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
179 MGMT_STATUS_BUSY, /* Role Switch Pending */
180 MGMT_STATUS_FAILED, /* Slot Violation */
181 MGMT_STATUS_FAILED, /* Role Switch Failed */
182 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
183 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
184 MGMT_STATUS_BUSY, /* Host Busy Pairing */
185 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
186 MGMT_STATUS_BUSY, /* Controller Busy */
187 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
188 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
189 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
190 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
191 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
192 };
193
194 static u8 mgmt_status(u8 hci_status)
195 {
196 if (hci_status < ARRAY_SIZE(mgmt_status_table))
197 return mgmt_status_table[hci_status];
198
199 return MGMT_STATUS_FAILED;
200 }
201
202 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
203 {
204 struct sk_buff *skb;
205 struct mgmt_hdr *hdr;
206 struct mgmt_ev_cmd_status *ev;
207 int err;
208
209 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
210
211 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
212 if (!skb)
213 return -ENOMEM;
214
215 hdr = (void *) skb_put(skb, sizeof(*hdr));
216
217 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
218 hdr->index = cpu_to_le16(index);
219 hdr->len = cpu_to_le16(sizeof(*ev));
220
221 ev = (void *) skb_put(skb, sizeof(*ev));
222 ev->status = status;
223 ev->opcode = cpu_to_le16(cmd);
224
225 err = sock_queue_rcv_skb(sk, skb);
226 if (err < 0)
227 kfree_skb(skb);
228
229 return err;
230 }
231
232 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
233 void *rp, size_t rp_len)
234 {
235 struct sk_buff *skb;
236 struct mgmt_hdr *hdr;
237 struct mgmt_ev_cmd_complete *ev;
238 int err;
239
240 BT_DBG("sock %p", sk);
241
242 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
243 if (!skb)
244 return -ENOMEM;
245
246 hdr = (void *) skb_put(skb, sizeof(*hdr));
247
248 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
249 hdr->index = cpu_to_le16(index);
250 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
251
252 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
253 ev->opcode = cpu_to_le16(cmd);
254 ev->status = status;
255
256 if (rp)
257 memcpy(ev->data, rp, rp_len);
258
259 err = sock_queue_rcv_skb(sk, skb);
260 if (err < 0)
261 kfree_skb(skb);
262
263 return err;
264 }
265
266 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
267 u16 data_len)
268 {
269 struct mgmt_rp_read_version rp;
270
271 BT_DBG("sock %p", sk);
272
273 rp.version = MGMT_VERSION;
274 rp.revision = cpu_to_le16(MGMT_REVISION);
275
276 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
277 sizeof(rp));
278 }
279
280 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
281 u16 data_len)
282 {
283 struct mgmt_rp_read_commands *rp;
284 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
285 const u16 num_events = ARRAY_SIZE(mgmt_events);
286 __le16 *opcode;
287 size_t rp_size;
288 int i, err;
289
290 BT_DBG("sock %p", sk);
291
292 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
293
294 rp = kmalloc(rp_size, GFP_KERNEL);
295 if (!rp)
296 return -ENOMEM;
297
298 rp->num_commands = cpu_to_le16(num_commands);
299 rp->num_events = cpu_to_le16(num_events);
300
301 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
302 put_unaligned_le16(mgmt_commands[i], opcode);
303
304 for (i = 0; i < num_events; i++, opcode++)
305 put_unaligned_le16(mgmt_events[i], opcode);
306
307 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
308 rp_size);
309 kfree(rp);
310
311 return err;
312 }
313
314 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
315 u16 data_len)
316 {
317 struct mgmt_rp_read_index_list *rp;
318 struct hci_dev *d;
319 size_t rp_len;
320 u16 count;
321 int err;
322
323 BT_DBG("sock %p", sk);
324
325 read_lock(&hci_dev_list_lock);
326
327 count = 0;
328 list_for_each_entry(d, &hci_dev_list, list) {
329 if (d->dev_type == HCI_BREDR)
330 count++;
331 }
332
333 rp_len = sizeof(*rp) + (2 * count);
334 rp = kmalloc(rp_len, GFP_ATOMIC);
335 if (!rp) {
336 read_unlock(&hci_dev_list_lock);
337 return -ENOMEM;
338 }
339
340 count = 0;
341 list_for_each_entry(d, &hci_dev_list, list) {
342 if (test_bit(HCI_SETUP, &d->dev_flags))
343 continue;
344
345 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
346 continue;
347
348 if (d->dev_type == HCI_BREDR) {
349 rp->index[count++] = cpu_to_le16(d->id);
350 BT_DBG("Added hci%u", d->id);
351 }
352 }
353
354 rp->num_controllers = cpu_to_le16(count);
355 rp_len = sizeof(*rp) + (2 * count);
356
357 read_unlock(&hci_dev_list_lock);
358
359 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
360 rp_len);
361
362 kfree(rp);
363
364 return err;
365 }
366
367 static u32 get_supported_settings(struct hci_dev *hdev)
368 {
369 u32 settings = 0;
370
371 settings |= MGMT_SETTING_POWERED;
372 settings |= MGMT_SETTING_PAIRABLE;
373 settings |= MGMT_SETTING_DEBUG_KEYS;
374
375 if (lmp_bredr_capable(hdev)) {
376 settings |= MGMT_SETTING_CONNECTABLE;
377 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
378 settings |= MGMT_SETTING_FAST_CONNECTABLE;
379 settings |= MGMT_SETTING_DISCOVERABLE;
380 settings |= MGMT_SETTING_BREDR;
381 settings |= MGMT_SETTING_LINK_SECURITY;
382
383 if (lmp_ssp_capable(hdev)) {
384 settings |= MGMT_SETTING_SSP;
385 settings |= MGMT_SETTING_HS;
386 }
387
388 if (lmp_sc_capable(hdev) ||
389 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
390 settings |= MGMT_SETTING_SECURE_CONN;
391 }
392
393 if (lmp_le_capable(hdev)) {
394 settings |= MGMT_SETTING_LE;
395 settings |= MGMT_SETTING_ADVERTISING;
396 settings |= MGMT_SETTING_PRIVACY;
397 }
398
399 return settings;
400 }
401
402 static u32 get_current_settings(struct hci_dev *hdev)
403 {
404 u32 settings = 0;
405
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
408
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
411
412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_FAST_CONNECTABLE;
414
415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_DISCOVERABLE;
417
418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_PAIRABLE;
420
421 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_BREDR;
423
424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LE;
426
427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
428 settings |= MGMT_SETTING_LINK_SECURITY;
429
430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SSP;
432
433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_HS;
435
436 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
437 settings |= MGMT_SETTING_ADVERTISING;
438
439 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
440 settings |= MGMT_SETTING_SECURE_CONN;
441
442 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
443 settings |= MGMT_SETTING_DEBUG_KEYS;
444
445 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
446 settings |= MGMT_SETTING_PRIVACY;
447
448 return settings;
449 }
450
451 #define PNP_INFO_SVCLASS_ID 0x1200
452
453 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
454 {
455 u8 *ptr = data, *uuids_start = NULL;
456 struct bt_uuid *uuid;
457
458 if (len < 4)
459 return ptr;
460
461 list_for_each_entry(uuid, &hdev->uuids, list) {
462 u16 uuid16;
463
464 if (uuid->size != 16)
465 continue;
466
467 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
468 if (uuid16 < 0x1100)
469 continue;
470
471 if (uuid16 == PNP_INFO_SVCLASS_ID)
472 continue;
473
474 if (!uuids_start) {
475 uuids_start = ptr;
476 uuids_start[0] = 1;
477 uuids_start[1] = EIR_UUID16_ALL;
478 ptr += 2;
479 }
480
481 /* Stop if not enough space to put next UUID */
482 if ((ptr - data) + sizeof(u16) > len) {
483 uuids_start[1] = EIR_UUID16_SOME;
484 break;
485 }
486
487 *ptr++ = (uuid16 & 0x00ff);
488 *ptr++ = (uuid16 & 0xff00) >> 8;
489 uuids_start[0] += sizeof(uuid16);
490 }
491
492 return ptr;
493 }
494
495 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
496 {
497 u8 *ptr = data, *uuids_start = NULL;
498 struct bt_uuid *uuid;
499
500 if (len < 6)
501 return ptr;
502
503 list_for_each_entry(uuid, &hdev->uuids, list) {
504 if (uuid->size != 32)
505 continue;
506
507 if (!uuids_start) {
508 uuids_start = ptr;
509 uuids_start[0] = 1;
510 uuids_start[1] = EIR_UUID32_ALL;
511 ptr += 2;
512 }
513
514 /* Stop if not enough space to put next UUID */
515 if ((ptr - data) + sizeof(u32) > len) {
516 uuids_start[1] = EIR_UUID32_SOME;
517 break;
518 }
519
520 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
521 ptr += sizeof(u32);
522 uuids_start[0] += sizeof(u32);
523 }
524
525 return ptr;
526 }
527
528 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
529 {
530 u8 *ptr = data, *uuids_start = NULL;
531 struct bt_uuid *uuid;
532
533 if (len < 18)
534 return ptr;
535
536 list_for_each_entry(uuid, &hdev->uuids, list) {
537 if (uuid->size != 128)
538 continue;
539
540 if (!uuids_start) {
541 uuids_start = ptr;
542 uuids_start[0] = 1;
543 uuids_start[1] = EIR_UUID128_ALL;
544 ptr += 2;
545 }
546
547 /* Stop if not enough space to put next UUID */
548 if ((ptr - data) + 16 > len) {
549 uuids_start[1] = EIR_UUID128_SOME;
550 break;
551 }
552
553 memcpy(ptr, uuid->uuid, 16);
554 ptr += 16;
555 uuids_start[0] += 16;
556 }
557
558 return ptr;
559 }
560
561 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
562 {
563 struct pending_cmd *cmd;
564
565 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
566 if (cmd->opcode == opcode)
567 return cmd;
568 }
569
570 return NULL;
571 }
572
573 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
574 {
575 u8 ad_len = 0;
576 size_t name_len;
577
578 name_len = strlen(hdev->dev_name);
579 if (name_len > 0) {
580 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
581
582 if (name_len > max_len) {
583 name_len = max_len;
584 ptr[1] = EIR_NAME_SHORT;
585 } else
586 ptr[1] = EIR_NAME_COMPLETE;
587
588 ptr[0] = name_len + 1;
589
590 memcpy(ptr + 2, hdev->dev_name, name_len);
591
592 ad_len += (name_len + 2);
593 ptr += (name_len + 2);
594 }
595
596 return ad_len;
597 }
598
599 static void update_scan_rsp_data(struct hci_request *req)
600 {
601 struct hci_dev *hdev = req->hdev;
602 struct hci_cp_le_set_scan_rsp_data cp;
603 u8 len;
604
605 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
606 return;
607
608 memset(&cp, 0, sizeof(cp));
609
610 len = create_scan_rsp_data(hdev, cp.data);
611
612 if (hdev->scan_rsp_data_len == len &&
613 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
614 return;
615
616 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
617 hdev->scan_rsp_data_len = len;
618
619 cp.length = len;
620
621 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
622 }
623
624 static u8 get_adv_discov_flags(struct hci_dev *hdev)
625 {
626 struct pending_cmd *cmd;
627
628 /* If there's a pending mgmt command the flags will not yet have
629 * their final values, so check for this first.
630 */
631 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
632 if (cmd) {
633 struct mgmt_mode *cp = cmd->param;
634 if (cp->val == 0x01)
635 return LE_AD_GENERAL;
636 else if (cp->val == 0x02)
637 return LE_AD_LIMITED;
638 } else {
639 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_LIMITED;
641 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
642 return LE_AD_GENERAL;
643 }
644
645 return 0;
646 }
647
648 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
649 {
650 u8 ad_len = 0, flags = 0;
651
652 flags |= get_adv_discov_flags(hdev);
653
654 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
655 flags |= LE_AD_NO_BREDR;
656
657 if (flags) {
658 BT_DBG("adv flags 0x%02x", flags);
659
660 ptr[0] = 2;
661 ptr[1] = EIR_FLAGS;
662 ptr[2] = flags;
663
664 ad_len += 3;
665 ptr += 3;
666 }
667
668 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
669 ptr[0] = 2;
670 ptr[1] = EIR_TX_POWER;
671 ptr[2] = (u8) hdev->adv_tx_power;
672
673 ad_len += 3;
674 ptr += 3;
675 }
676
677 return ad_len;
678 }
679
680 static void update_adv_data(struct hci_request *req)
681 {
682 struct hci_dev *hdev = req->hdev;
683 struct hci_cp_le_set_adv_data cp;
684 u8 len;
685
686 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
687 return;
688
689 memset(&cp, 0, sizeof(cp));
690
691 len = create_adv_data(hdev, cp.data);
692
693 if (hdev->adv_data_len == len &&
694 memcmp(cp.data, hdev->adv_data, len) == 0)
695 return;
696
697 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
698 hdev->adv_data_len = len;
699
700 cp.length = len;
701
702 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
703 }
704
705 static void create_eir(struct hci_dev *hdev, u8 *data)
706 {
707 u8 *ptr = data;
708 size_t name_len;
709
710 name_len = strlen(hdev->dev_name);
711
712 if (name_len > 0) {
713 /* EIR Data type */
714 if (name_len > 48) {
715 name_len = 48;
716 ptr[1] = EIR_NAME_SHORT;
717 } else
718 ptr[1] = EIR_NAME_COMPLETE;
719
720 /* EIR Data length */
721 ptr[0] = name_len + 1;
722
723 memcpy(ptr + 2, hdev->dev_name, name_len);
724
725 ptr += (name_len + 2);
726 }
727
728 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
729 ptr[0] = 2;
730 ptr[1] = EIR_TX_POWER;
731 ptr[2] = (u8) hdev->inq_tx_power;
732
733 ptr += 3;
734 }
735
736 if (hdev->devid_source > 0) {
737 ptr[0] = 9;
738 ptr[1] = EIR_DEVICE_ID;
739
740 put_unaligned_le16(hdev->devid_source, ptr + 2);
741 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
742 put_unaligned_le16(hdev->devid_product, ptr + 6);
743 put_unaligned_le16(hdev->devid_version, ptr + 8);
744
745 ptr += 10;
746 }
747
748 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 }
752
753 static void update_eir(struct hci_request *req)
754 {
755 struct hci_dev *hdev = req->hdev;
756 struct hci_cp_write_eir cp;
757
758 if (!hdev_is_powered(hdev))
759 return;
760
761 if (!lmp_ext_inq_capable(hdev))
762 return;
763
764 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
765 return;
766
767 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
768 return;
769
770 memset(&cp, 0, sizeof(cp));
771
772 create_eir(hdev, cp.data);
773
774 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
775 return;
776
777 memcpy(hdev->eir, cp.data, sizeof(cp.data));
778
779 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
780 }
781
782 static u8 get_service_classes(struct hci_dev *hdev)
783 {
784 struct bt_uuid *uuid;
785 u8 val = 0;
786
787 list_for_each_entry(uuid, &hdev->uuids, list)
788 val |= uuid->svc_hint;
789
790 return val;
791 }
792
793 static void update_class(struct hci_request *req)
794 {
795 struct hci_dev *hdev = req->hdev;
796 u8 cod[3];
797
798 BT_DBG("%s", hdev->name);
799
800 if (!hdev_is_powered(hdev))
801 return;
802
803 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
804 return;
805
806 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
807 return;
808
809 cod[0] = hdev->minor_class;
810 cod[1] = hdev->major_class;
811 cod[2] = get_service_classes(hdev);
812
813 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
814 cod[1] |= 0x20;
815
816 if (memcmp(cod, hdev->dev_class, 3) == 0)
817 return;
818
819 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
820 }
821
822 static bool get_connectable(struct hci_dev *hdev)
823 {
824 struct pending_cmd *cmd;
825
826 /* If there's a pending mgmt command the flag will not yet have
827 * it's final value, so check for this first.
828 */
829 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
830 if (cmd) {
831 struct mgmt_mode *cp = cmd->param;
832 return cp->val;
833 }
834
835 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
836 }
837
838 static void enable_advertising(struct hci_request *req)
839 {
840 struct hci_dev *hdev = req->hdev;
841 struct hci_cp_le_set_adv_param cp;
842 u8 own_addr_type, enable = 0x01;
843 bool connectable;
844
845 /* Clear the HCI_ADVERTISING bit temporarily so that the
846 * hci_update_random_address knows that it's safe to go ahead
847 * and write a new random address. The flag will be set back on
848 * as soon as the SET_ADV_ENABLE HCI command completes.
849 */
850 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
851
852 connectable = get_connectable(hdev);
853
854 /* Set require_privacy to true only when non-connectable
855 * advertising is used. In that case it is fine to use a
856 * non-resolvable private address.
857 */
858 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
859 return;
860
861 memset(&cp, 0, sizeof(cp));
862 cp.min_interval = cpu_to_le16(0x0800);
863 cp.max_interval = cpu_to_le16(0x0800);
864 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
865 cp.own_address_type = own_addr_type;
866 cp.channel_map = hdev->le_adv_channel_map;
867
868 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
869
870 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
871 }
872
873 static void disable_advertising(struct hci_request *req)
874 {
875 u8 enable = 0x00;
876
877 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
878 }
879
880 static void service_cache_off(struct work_struct *work)
881 {
882 struct hci_dev *hdev = container_of(work, struct hci_dev,
883 service_cache.work);
884 struct hci_request req;
885
886 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
887 return;
888
889 hci_req_init(&req, hdev);
890
891 hci_dev_lock(hdev);
892
893 update_eir(&req);
894 update_class(&req);
895
896 hci_dev_unlock(hdev);
897
898 hci_req_run(&req, NULL);
899 }
900
901 static void rpa_expired(struct work_struct *work)
902 {
903 struct hci_dev *hdev = container_of(work, struct hci_dev,
904 rpa_expired.work);
905 struct hci_request req;
906
907 BT_DBG("");
908
909 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
910
911 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
912 hci_conn_num(hdev, LE_LINK) > 0)
913 return;
914
915 /* The generation of a new RPA and programming it into the
916 * controller happens in the enable_advertising() function.
917 */
918
919 hci_req_init(&req, hdev);
920
921 disable_advertising(&req);
922 enable_advertising(&req);
923
924 hci_req_run(&req, NULL);
925 }
926
927 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
928 {
929 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
930 return;
931
932 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
933 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
934
935 /* Non-mgmt controlled devices get this bit set
936 * implicitly so that pairing works for them, however
937 * for mgmt we require user-space to explicitly enable
938 * it
939 */
940 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
941 }
942
943 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
944 void *data, u16 data_len)
945 {
946 struct mgmt_rp_read_info rp;
947
948 BT_DBG("sock %p %s", sk, hdev->name);
949
950 hci_dev_lock(hdev);
951
952 memset(&rp, 0, sizeof(rp));
953
954 bacpy(&rp.bdaddr, &hdev->bdaddr);
955
956 rp.version = hdev->hci_ver;
957 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
958
959 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
960 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
961
962 memcpy(rp.dev_class, hdev->dev_class, 3);
963
964 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
965 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
966
967 hci_dev_unlock(hdev);
968
969 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
970 sizeof(rp));
971 }
972
973 static void mgmt_pending_free(struct pending_cmd *cmd)
974 {
975 sock_put(cmd->sk);
976 kfree(cmd->param);
977 kfree(cmd);
978 }
979
980 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
981 struct hci_dev *hdev, void *data,
982 u16 len)
983 {
984 struct pending_cmd *cmd;
985
986 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
987 if (!cmd)
988 return NULL;
989
990 cmd->opcode = opcode;
991 cmd->index = hdev->id;
992
993 cmd->param = kmalloc(len, GFP_KERNEL);
994 if (!cmd->param) {
995 kfree(cmd);
996 return NULL;
997 }
998
999 if (data)
1000 memcpy(cmd->param, data, len);
1001
1002 cmd->sk = sk;
1003 sock_hold(sk);
1004
1005 list_add(&cmd->list, &hdev->mgmt_pending);
1006
1007 return cmd;
1008 }
1009
1010 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1011 void (*cb)(struct pending_cmd *cmd,
1012 void *data),
1013 void *data)
1014 {
1015 struct pending_cmd *cmd, *tmp;
1016
1017 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1018 if (opcode > 0 && cmd->opcode != opcode)
1019 continue;
1020
1021 cb(cmd, data);
1022 }
1023 }
1024
1025 static void mgmt_pending_remove(struct pending_cmd *cmd)
1026 {
1027 list_del(&cmd->list);
1028 mgmt_pending_free(cmd);
1029 }
1030
1031 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1032 {
1033 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1034
1035 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1036 sizeof(settings));
1037 }
1038
1039 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1040 {
1041 BT_DBG("%s status 0x%02x", hdev->name, status);
1042
1043 if (hci_conn_count(hdev) == 0) {
1044 cancel_delayed_work(&hdev->power_off);
1045 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1046 }
1047 }
1048
1049 static int clean_up_hci_state(struct hci_dev *hdev)
1050 {
1051 struct hci_request req;
1052 struct hci_conn *conn;
1053
1054 hci_req_init(&req, hdev);
1055
1056 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1057 test_bit(HCI_PSCAN, &hdev->flags)) {
1058 u8 scan = 0x00;
1059 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1060 }
1061
1062 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1063 disable_advertising(&req);
1064
1065 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1066 hci_req_add_le_scan_disable(&req);
1067 }
1068
1069 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1070 struct hci_cp_disconnect dc;
1071 struct hci_cp_reject_conn_req rej;
1072
1073 switch (conn->state) {
1074 case BT_CONNECTED:
1075 case BT_CONFIG:
1076 dc.handle = cpu_to_le16(conn->handle);
1077 dc.reason = 0x15; /* Terminated due to Power Off */
1078 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1079 break;
1080 case BT_CONNECT:
1081 if (conn->type == LE_LINK)
1082 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1083 0, NULL);
1084 else if (conn->type == ACL_LINK)
1085 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1086 6, &conn->dst);
1087 break;
1088 case BT_CONNECT2:
1089 bacpy(&rej.bdaddr, &conn->dst);
1090 rej.reason = 0x15; /* Terminated due to Power Off */
1091 if (conn->type == ACL_LINK)
1092 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1093 sizeof(rej), &rej);
1094 else if (conn->type == SCO_LINK)
1095 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1096 sizeof(rej), &rej);
1097 break;
1098 }
1099 }
1100
1101 return hci_req_run(&req, clean_up_hci_complete);
1102 }
1103
1104 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1105 u16 len)
1106 {
1107 struct mgmt_mode *cp = data;
1108 struct pending_cmd *cmd;
1109 int err;
1110
1111 BT_DBG("request for %s", hdev->name);
1112
1113 if (cp->val != 0x00 && cp->val != 0x01)
1114 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1115 MGMT_STATUS_INVALID_PARAMS);
1116
1117 hci_dev_lock(hdev);
1118
1119 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1120 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1121 MGMT_STATUS_BUSY);
1122 goto failed;
1123 }
1124
1125 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1126 cancel_delayed_work(&hdev->power_off);
1127
1128 if (cp->val) {
1129 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1130 data, len);
1131 err = mgmt_powered(hdev, 1);
1132 goto failed;
1133 }
1134 }
1135
1136 if (!!cp->val == hdev_is_powered(hdev)) {
1137 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1138 goto failed;
1139 }
1140
1141 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1142 if (!cmd) {
1143 err = -ENOMEM;
1144 goto failed;
1145 }
1146
1147 if (cp->val) {
1148 queue_work(hdev->req_workqueue, &hdev->power_on);
1149 err = 0;
1150 } else {
1151 /* Disconnect connections, stop scans, etc */
1152 err = clean_up_hci_state(hdev);
1153 if (!err)
1154 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1155 HCI_POWER_OFF_TIMEOUT);
1156
1157 /* ENODATA means there were no HCI commands queued */
1158 if (err == -ENODATA) {
1159 cancel_delayed_work(&hdev->power_off);
1160 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1161 err = 0;
1162 }
1163 }
1164
1165 failed:
1166 hci_dev_unlock(hdev);
1167 return err;
1168 }
1169
1170 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1171 struct sock *skip_sk)
1172 {
1173 struct sk_buff *skb;
1174 struct mgmt_hdr *hdr;
1175
1176 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1177 if (!skb)
1178 return -ENOMEM;
1179
1180 hdr = (void *) skb_put(skb, sizeof(*hdr));
1181 hdr->opcode = cpu_to_le16(event);
1182 if (hdev)
1183 hdr->index = cpu_to_le16(hdev->id);
1184 else
1185 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1186 hdr->len = cpu_to_le16(data_len);
1187
1188 if (data)
1189 memcpy(skb_put(skb, data_len), data, data_len);
1190
1191 /* Time stamp */
1192 __net_timestamp(skb);
1193
1194 hci_send_to_control(skb, skip_sk);
1195 kfree_skb(skb);
1196
1197 return 0;
1198 }
1199
1200 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1201 {
1202 __le32 ev;
1203
1204 ev = cpu_to_le32(get_current_settings(hdev));
1205
1206 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1207 }
1208
1209 struct cmd_lookup {
1210 struct sock *sk;
1211 struct hci_dev *hdev;
1212 u8 mgmt_status;
1213 };
1214
1215 static void settings_rsp(struct pending_cmd *cmd, void *data)
1216 {
1217 struct cmd_lookup *match = data;
1218
1219 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1220
1221 list_del(&cmd->list);
1222
1223 if (match->sk == NULL) {
1224 match->sk = cmd->sk;
1225 sock_hold(match->sk);
1226 }
1227
1228 mgmt_pending_free(cmd);
1229 }
1230
1231 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1232 {
1233 u8 *status = data;
1234
1235 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1236 mgmt_pending_remove(cmd);
1237 }
1238
1239 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1240 {
1241 if (!lmp_bredr_capable(hdev))
1242 return MGMT_STATUS_NOT_SUPPORTED;
1243 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1244 return MGMT_STATUS_REJECTED;
1245 else
1246 return MGMT_STATUS_SUCCESS;
1247 }
1248
1249 static u8 mgmt_le_support(struct hci_dev *hdev)
1250 {
1251 if (!lmp_le_capable(hdev))
1252 return MGMT_STATUS_NOT_SUPPORTED;
1253 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1254 return MGMT_STATUS_REJECTED;
1255 else
1256 return MGMT_STATUS_SUCCESS;
1257 }
1258
1259 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1260 {
1261 struct pending_cmd *cmd;
1262 struct mgmt_mode *cp;
1263 struct hci_request req;
1264 bool changed;
1265
1266 BT_DBG("status 0x%02x", status);
1267
1268 hci_dev_lock(hdev);
1269
1270 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1271 if (!cmd)
1272 goto unlock;
1273
1274 if (status) {
1275 u8 mgmt_err = mgmt_status(status);
1276 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1277 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1278 goto remove_cmd;
1279 }
1280
1281 cp = cmd->param;
1282 if (cp->val) {
1283 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1284 &hdev->dev_flags);
1285
1286 if (hdev->discov_timeout > 0) {
1287 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1288 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1289 to);
1290 }
1291 } else {
1292 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1293 &hdev->dev_flags);
1294 }
1295
1296 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1297
1298 if (changed)
1299 new_settings(hdev, cmd->sk);
1300
1301 /* When the discoverable mode gets changed, make sure
1302 * that class of device has the limited discoverable
1303 * bit correctly set.
1304 */
1305 hci_req_init(&req, hdev);
1306 update_class(&req);
1307 hci_req_run(&req, NULL);
1308
1309 remove_cmd:
1310 mgmt_pending_remove(cmd);
1311
1312 unlock:
1313 hci_dev_unlock(hdev);
1314 }
1315
1316 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1317 u16 len)
1318 {
1319 struct mgmt_cp_set_discoverable *cp = data;
1320 struct pending_cmd *cmd;
1321 struct hci_request req;
1322 u16 timeout;
1323 u8 scan;
1324 int err;
1325
1326 BT_DBG("request for %s", hdev->name);
1327
1328 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1329 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1330 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1331 MGMT_STATUS_REJECTED);
1332
1333 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1334 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1335 MGMT_STATUS_INVALID_PARAMS);
1336
1337 timeout = __le16_to_cpu(cp->timeout);
1338
1339 /* Disabling discoverable requires that no timeout is set,
1340 * and enabling limited discoverable requires a timeout.
1341 */
1342 if ((cp->val == 0x00 && timeout > 0) ||
1343 (cp->val == 0x02 && timeout == 0))
1344 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1345 MGMT_STATUS_INVALID_PARAMS);
1346
1347 hci_dev_lock(hdev);
1348
1349 if (!hdev_is_powered(hdev) && timeout > 0) {
1350 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 MGMT_STATUS_NOT_POWERED);
1352 goto failed;
1353 }
1354
1355 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1356 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1357 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1358 MGMT_STATUS_BUSY);
1359 goto failed;
1360 }
1361
1362 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1363 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 MGMT_STATUS_REJECTED);
1365 goto failed;
1366 }
1367
1368 if (!hdev_is_powered(hdev)) {
1369 bool changed = false;
1370
1371 /* Setting limited discoverable when powered off is
1372 * not a valid operation since it requires a timeout
1373 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1374 */
1375 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1376 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1377 changed = true;
1378 }
1379
1380 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1381 if (err < 0)
1382 goto failed;
1383
1384 if (changed)
1385 err = new_settings(hdev, sk);
1386
1387 goto failed;
1388 }
1389
1390 /* If the current mode is the same, then just update the timeout
1391 * value with the new value. And if only the timeout gets updated,
1392 * then no need for any HCI transactions.
1393 */
1394 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1395 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1396 &hdev->dev_flags)) {
1397 cancel_delayed_work(&hdev->discov_off);
1398 hdev->discov_timeout = timeout;
1399
1400 if (cp->val && hdev->discov_timeout > 0) {
1401 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1402 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1403 to);
1404 }
1405
1406 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1407 goto failed;
1408 }
1409
1410 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1411 if (!cmd) {
1412 err = -ENOMEM;
1413 goto failed;
1414 }
1415
1416 /* Cancel any potential discoverable timeout that might be
1417 * still active and store new timeout value. The arming of
1418 * the timeout happens in the complete handler.
1419 */
1420 cancel_delayed_work(&hdev->discov_off);
1421 hdev->discov_timeout = timeout;
1422
1423 /* Limited discoverable mode */
1424 if (cp->val == 0x02)
1425 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1426 else
1427 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1428
1429 hci_req_init(&req, hdev);
1430
1431 /* The procedure for LE-only controllers is much simpler - just
1432 * update the advertising data.
1433 */
1434 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1435 goto update_ad;
1436
1437 scan = SCAN_PAGE;
1438
1439 if (cp->val) {
1440 struct hci_cp_write_current_iac_lap hci_cp;
1441
1442 if (cp->val == 0x02) {
1443 /* Limited discoverable mode */
1444 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1445 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1446 hci_cp.iac_lap[1] = 0x8b;
1447 hci_cp.iac_lap[2] = 0x9e;
1448 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1449 hci_cp.iac_lap[4] = 0x8b;
1450 hci_cp.iac_lap[5] = 0x9e;
1451 } else {
1452 /* General discoverable mode */
1453 hci_cp.num_iac = 1;
1454 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1455 hci_cp.iac_lap[1] = 0x8b;
1456 hci_cp.iac_lap[2] = 0x9e;
1457 }
1458
1459 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1460 (hci_cp.num_iac * 3) + 1, &hci_cp);
1461
1462 scan |= SCAN_INQUIRY;
1463 } else {
1464 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1465 }
1466
1467 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1468
1469 update_ad:
1470 update_adv_data(&req);
1471
1472 err = hci_req_run(&req, set_discoverable_complete);
1473 if (err < 0)
1474 mgmt_pending_remove(cmd);
1475
1476 failed:
1477 hci_dev_unlock(hdev);
1478 return err;
1479 }
1480
1481 static void write_fast_connectable(struct hci_request *req, bool enable)
1482 {
1483 struct hci_dev *hdev = req->hdev;
1484 struct hci_cp_write_page_scan_activity acp;
1485 u8 type;
1486
1487 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1488 return;
1489
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (enable) {
1494 type = PAGE_SCAN_TYPE_INTERLACED;
1495
1496 /* 160 msec page scan interval */
1497 acp.interval = cpu_to_le16(0x0100);
1498 } else {
1499 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1500
1501 /* default 1.28 sec page scan */
1502 acp.interval = cpu_to_le16(0x0800);
1503 }
1504
1505 acp.window = cpu_to_le16(0x0012);
1506
1507 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1508 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1509 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1510 sizeof(acp), &acp);
1511
1512 if (hdev->page_scan_type != type)
1513 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1514 }
1515
1516 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1517 {
1518 struct pending_cmd *cmd;
1519 struct mgmt_mode *cp;
1520 bool changed;
1521
1522 BT_DBG("status 0x%02x", status);
1523
1524 hci_dev_lock(hdev);
1525
1526 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1527 if (!cmd)
1528 goto unlock;
1529
1530 if (status) {
1531 u8 mgmt_err = mgmt_status(status);
1532 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1533 goto remove_cmd;
1534 }
1535
1536 cp = cmd->param;
1537 if (cp->val)
1538 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1539 else
1540 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1541
1542 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1543
1544 if (changed)
1545 new_settings(hdev, cmd->sk);
1546
1547 remove_cmd:
1548 mgmt_pending_remove(cmd);
1549
1550 unlock:
1551 hci_dev_unlock(hdev);
1552 }
1553
1554 static int set_connectable_update_settings(struct hci_dev *hdev,
1555 struct sock *sk, u8 val)
1556 {
1557 bool changed = false;
1558 int err;
1559
1560 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1561 changed = true;
1562
1563 if (val) {
1564 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1565 } else {
1566 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1567 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1568 }
1569
1570 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1571 if (err < 0)
1572 return err;
1573
1574 if (changed)
1575 return new_settings(hdev, sk);
1576
1577 return 0;
1578 }
1579
1580 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1581 u16 len)
1582 {
1583 struct mgmt_mode *cp = data;
1584 struct pending_cmd *cmd;
1585 struct hci_request req;
1586 u8 scan;
1587 int err;
1588
1589 BT_DBG("request for %s", hdev->name);
1590
1591 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1592 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1593 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1594 MGMT_STATUS_REJECTED);
1595
1596 if (cp->val != 0x00 && cp->val != 0x01)
1597 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1598 MGMT_STATUS_INVALID_PARAMS);
1599
1600 hci_dev_lock(hdev);
1601
1602 if (!hdev_is_powered(hdev)) {
1603 err = set_connectable_update_settings(hdev, sk, cp->val);
1604 goto failed;
1605 }
1606
1607 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1608 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1609 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1610 MGMT_STATUS_BUSY);
1611 goto failed;
1612 }
1613
1614 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1615 if (!cmd) {
1616 err = -ENOMEM;
1617 goto failed;
1618 }
1619
1620 hci_req_init(&req, hdev);
1621
1622 /* If BR/EDR is not enabled and we disable advertising as a
1623 * by-product of disabling connectable, we need to update the
1624 * advertising flags.
1625 */
1626 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1627 if (!cp->val) {
1628 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1629 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1630 }
1631 update_adv_data(&req);
1632 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1633 if (cp->val) {
1634 scan = SCAN_PAGE;
1635 } else {
1636 scan = 0;
1637
1638 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1639 hdev->discov_timeout > 0)
1640 cancel_delayed_work(&hdev->discov_off);
1641 }
1642
1643 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1644 }
1645
1646 /* If we're going from non-connectable to connectable or
1647 * vice-versa when fast connectable is enabled ensure that fast
1648 * connectable gets disabled. write_fast_connectable won't do
1649 * anything if the page scan parameters are already what they
1650 * should be.
1651 */
1652 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1653 write_fast_connectable(&req, false);
1654
1655 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1656 hci_conn_num(hdev, LE_LINK) == 0) {
1657 disable_advertising(&req);
1658 enable_advertising(&req);
1659 }
1660
1661 err = hci_req_run(&req, set_connectable_complete);
1662 if (err < 0) {
1663 mgmt_pending_remove(cmd);
1664 if (err == -ENODATA)
1665 err = set_connectable_update_settings(hdev, sk,
1666 cp->val);
1667 goto failed;
1668 }
1669
1670 failed:
1671 hci_dev_unlock(hdev);
1672 return err;
1673 }
1674
1675 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1676 u16 len)
1677 {
1678 struct mgmt_mode *cp = data;
1679 bool changed;
1680 int err;
1681
1682 BT_DBG("request for %s", hdev->name);
1683
1684 if (cp->val != 0x00 && cp->val != 0x01)
1685 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1686 MGMT_STATUS_INVALID_PARAMS);
1687
1688 hci_dev_lock(hdev);
1689
1690 if (cp->val)
1691 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1692 else
1693 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1694
1695 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1696 if (err < 0)
1697 goto unlock;
1698
1699 if (changed)
1700 err = new_settings(hdev, sk);
1701
1702 unlock:
1703 hci_dev_unlock(hdev);
1704 return err;
1705 }
1706
1707 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1708 u16 len)
1709 {
1710 struct mgmt_mode *cp = data;
1711 struct pending_cmd *cmd;
1712 u8 val, status;
1713 int err;
1714
1715 BT_DBG("request for %s", hdev->name);
1716
1717 status = mgmt_bredr_support(hdev);
1718 if (status)
1719 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1720 status);
1721
1722 if (cp->val != 0x00 && cp->val != 0x01)
1723 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1724 MGMT_STATUS_INVALID_PARAMS);
1725
1726 hci_dev_lock(hdev);
1727
1728 if (!hdev_is_powered(hdev)) {
1729 bool changed = false;
1730
1731 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1732 &hdev->dev_flags)) {
1733 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1734 changed = true;
1735 }
1736
1737 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1738 if (err < 0)
1739 goto failed;
1740
1741 if (changed)
1742 err = new_settings(hdev, sk);
1743
1744 goto failed;
1745 }
1746
1747 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1748 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1749 MGMT_STATUS_BUSY);
1750 goto failed;
1751 }
1752
1753 val = !!cp->val;
1754
1755 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1756 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 goto failed;
1758 }
1759
1760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1761 if (!cmd) {
1762 err = -ENOMEM;
1763 goto failed;
1764 }
1765
1766 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1767 if (err < 0) {
1768 mgmt_pending_remove(cmd);
1769 goto failed;
1770 }
1771
1772 failed:
1773 hci_dev_unlock(hdev);
1774 return err;
1775 }
1776
1777 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1778 {
1779 struct mgmt_mode *cp = data;
1780 struct pending_cmd *cmd;
1781 u8 status;
1782 int err;
1783
1784 BT_DBG("request for %s", hdev->name);
1785
1786 status = mgmt_bredr_support(hdev);
1787 if (status)
1788 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1789
1790 if (!lmp_ssp_capable(hdev))
1791 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1792 MGMT_STATUS_NOT_SUPPORTED);
1793
1794 if (cp->val != 0x00 && cp->val != 0x01)
1795 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1796 MGMT_STATUS_INVALID_PARAMS);
1797
1798 hci_dev_lock(hdev);
1799
1800 if (!hdev_is_powered(hdev)) {
1801 bool changed;
1802
1803 if (cp->val) {
1804 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1805 &hdev->dev_flags);
1806 } else {
1807 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1808 &hdev->dev_flags);
1809 if (!changed)
1810 changed = test_and_clear_bit(HCI_HS_ENABLED,
1811 &hdev->dev_flags);
1812 else
1813 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1814 }
1815
1816 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1817 if (err < 0)
1818 goto failed;
1819
1820 if (changed)
1821 err = new_settings(hdev, sk);
1822
1823 goto failed;
1824 }
1825
1826 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1827 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1828 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1829 MGMT_STATUS_BUSY);
1830 goto failed;
1831 }
1832
1833 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1834 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1835 goto failed;
1836 }
1837
1838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1839 if (!cmd) {
1840 err = -ENOMEM;
1841 goto failed;
1842 }
1843
1844 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1845 if (err < 0) {
1846 mgmt_pending_remove(cmd);
1847 goto failed;
1848 }
1849
1850 failed:
1851 hci_dev_unlock(hdev);
1852 return err;
1853 }
1854
1855 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1856 {
1857 struct mgmt_mode *cp = data;
1858 bool changed;
1859 u8 status;
1860 int err;
1861
1862 BT_DBG("request for %s", hdev->name);
1863
1864 status = mgmt_bredr_support(hdev);
1865 if (status)
1866 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1867
1868 if (!lmp_ssp_capable(hdev))
1869 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1870 MGMT_STATUS_NOT_SUPPORTED);
1871
1872 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1874 MGMT_STATUS_REJECTED);
1875
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1878 MGMT_STATUS_INVALID_PARAMS);
1879
1880 hci_dev_lock(hdev);
1881
1882 if (cp->val) {
1883 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1884 } else {
1885 if (hdev_is_powered(hdev)) {
1886 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1887 MGMT_STATUS_REJECTED);
1888 goto unlock;
1889 }
1890
1891 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1892 }
1893
1894 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1895 if (err < 0)
1896 goto unlock;
1897
1898 if (changed)
1899 err = new_settings(hdev, sk);
1900
1901 unlock:
1902 hci_dev_unlock(hdev);
1903 return err;
1904 }
1905
1906 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1907 {
1908 struct cmd_lookup match = { NULL, hdev };
1909
1910 if (status) {
1911 u8 mgmt_err = mgmt_status(status);
1912
1913 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1914 &mgmt_err);
1915 return;
1916 }
1917
1918 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1919
1920 new_settings(hdev, match.sk);
1921
1922 if (match.sk)
1923 sock_put(match.sk);
1924
1925 /* Make sure the controller has a good default for
1926 * advertising data. Restrict the update to when LE
1927 * has actually been enabled. During power on, the
1928 * update in powered_update_hci will take care of it.
1929 */
1930 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1931 struct hci_request req;
1932
1933 hci_dev_lock(hdev);
1934
1935 hci_req_init(&req, hdev);
1936 update_adv_data(&req);
1937 update_scan_rsp_data(&req);
1938 hci_req_run(&req, NULL);
1939
1940 hci_dev_unlock(hdev);
1941 }
1942 }
1943
1944 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 {
1946 struct mgmt_mode *cp = data;
1947 struct hci_cp_write_le_host_supported hci_cp;
1948 struct pending_cmd *cmd;
1949 struct hci_request req;
1950 int err;
1951 u8 val, enabled;
1952
1953 BT_DBG("request for %s", hdev->name);
1954
1955 if (!lmp_le_capable(hdev))
1956 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1957 MGMT_STATUS_NOT_SUPPORTED);
1958
1959 if (cp->val != 0x00 && cp->val != 0x01)
1960 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1961 MGMT_STATUS_INVALID_PARAMS);
1962
1963 /* LE-only devices do not allow toggling LE on/off */
1964 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1965 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1966 MGMT_STATUS_REJECTED);
1967
1968 hci_dev_lock(hdev);
1969
1970 val = !!cp->val;
1971 enabled = lmp_host_le_capable(hdev);
1972
1973 if (!hdev_is_powered(hdev) || val == enabled) {
1974 bool changed = false;
1975
1976 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1977 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1978 changed = true;
1979 }
1980
1981 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1982 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1983 changed = true;
1984 }
1985
1986 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1987 if (err < 0)
1988 goto unlock;
1989
1990 if (changed)
1991 err = new_settings(hdev, sk);
1992
1993 goto unlock;
1994 }
1995
1996 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1997 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1998 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1999 MGMT_STATUS_BUSY);
2000 goto unlock;
2001 }
2002
2003 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2004 if (!cmd) {
2005 err = -ENOMEM;
2006 goto unlock;
2007 }
2008
2009 hci_req_init(&req, hdev);
2010
2011 memset(&hci_cp, 0, sizeof(hci_cp));
2012
2013 if (val) {
2014 hci_cp.le = val;
2015 hci_cp.simul = lmp_le_br_capable(hdev);
2016 } else {
2017 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2018 disable_advertising(&req);
2019 }
2020
2021 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2022 &hci_cp);
2023
2024 err = hci_req_run(&req, le_enable_complete);
2025 if (err < 0)
2026 mgmt_pending_remove(cmd);
2027
2028 unlock:
2029 hci_dev_unlock(hdev);
2030 return err;
2031 }
2032
2033 /* This is a helper function to test for pending mgmt commands that can
2034 * cause CoD or EIR HCI commands. We can only allow one such pending
2035 * mgmt command at a time since otherwise we cannot easily track what
2036 * the current values are, will be, and based on that calculate if a new
2037 * HCI command needs to be sent and if yes with what value.
2038 */
2039 static bool pending_eir_or_class(struct hci_dev *hdev)
2040 {
2041 struct pending_cmd *cmd;
2042
2043 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2044 switch (cmd->opcode) {
2045 case MGMT_OP_ADD_UUID:
2046 case MGMT_OP_REMOVE_UUID:
2047 case MGMT_OP_SET_DEV_CLASS:
2048 case MGMT_OP_SET_POWERED:
2049 return true;
2050 }
2051 }
2052
2053 return false;
2054 }
2055
2056 static const u8 bluetooth_base_uuid[] = {
2057 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2058 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2059 };
2060
2061 static u8 get_uuid_size(const u8 *uuid)
2062 {
2063 u32 val;
2064
2065 if (memcmp(uuid, bluetooth_base_uuid, 12))
2066 return 128;
2067
2068 val = get_unaligned_le32(&uuid[12]);
2069 if (val > 0xffff)
2070 return 32;
2071
2072 return 16;
2073 }
2074
2075 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2076 {
2077 struct pending_cmd *cmd;
2078
2079 hci_dev_lock(hdev);
2080
2081 cmd = mgmt_pending_find(mgmt_op, hdev);
2082 if (!cmd)
2083 goto unlock;
2084
2085 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2086 hdev->dev_class, 3);
2087
2088 mgmt_pending_remove(cmd);
2089
2090 unlock:
2091 hci_dev_unlock(hdev);
2092 }
2093
2094 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2095 {
2096 BT_DBG("status 0x%02x", status);
2097
2098 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2099 }
2100
2101 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2102 {
2103 struct mgmt_cp_add_uuid *cp = data;
2104 struct pending_cmd *cmd;
2105 struct hci_request req;
2106 struct bt_uuid *uuid;
2107 int err;
2108
2109 BT_DBG("request for %s", hdev->name);
2110
2111 hci_dev_lock(hdev);
2112
2113 if (pending_eir_or_class(hdev)) {
2114 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2115 MGMT_STATUS_BUSY);
2116 goto failed;
2117 }
2118
2119 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2120 if (!uuid) {
2121 err = -ENOMEM;
2122 goto failed;
2123 }
2124
2125 memcpy(uuid->uuid, cp->uuid, 16);
2126 uuid->svc_hint = cp->svc_hint;
2127 uuid->size = get_uuid_size(cp->uuid);
2128
2129 list_add_tail(&uuid->list, &hdev->uuids);
2130
2131 hci_req_init(&req, hdev);
2132
2133 update_class(&req);
2134 update_eir(&req);
2135
2136 err = hci_req_run(&req, add_uuid_complete);
2137 if (err < 0) {
2138 if (err != -ENODATA)
2139 goto failed;
2140
2141 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2142 hdev->dev_class, 3);
2143 goto failed;
2144 }
2145
2146 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2147 if (!cmd) {
2148 err = -ENOMEM;
2149 goto failed;
2150 }
2151
2152 err = 0;
2153
2154 failed:
2155 hci_dev_unlock(hdev);
2156 return err;
2157 }
2158
2159 static bool enable_service_cache(struct hci_dev *hdev)
2160 {
2161 if (!hdev_is_powered(hdev))
2162 return false;
2163
2164 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2165 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2166 CACHE_TIMEOUT);
2167 return true;
2168 }
2169
2170 return false;
2171 }
2172
2173 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2174 {
2175 BT_DBG("status 0x%02x", status);
2176
2177 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2178 }
2179
2180 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2181 u16 len)
2182 {
2183 struct mgmt_cp_remove_uuid *cp = data;
2184 struct pending_cmd *cmd;
2185 struct bt_uuid *match, *tmp;
2186 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2187 struct hci_request req;
2188 int err, found;
2189
2190 BT_DBG("request for %s", hdev->name);
2191
2192 hci_dev_lock(hdev);
2193
2194 if (pending_eir_or_class(hdev)) {
2195 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2196 MGMT_STATUS_BUSY);
2197 goto unlock;
2198 }
2199
2200 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2201 hci_uuids_clear(hdev);
2202
2203 if (enable_service_cache(hdev)) {
2204 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2205 0, hdev->dev_class, 3);
2206 goto unlock;
2207 }
2208
2209 goto update_class;
2210 }
2211
2212 found = 0;
2213
2214 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2215 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2216 continue;
2217
2218 list_del(&match->list);
2219 kfree(match);
2220 found++;
2221 }
2222
2223 if (found == 0) {
2224 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2225 MGMT_STATUS_INVALID_PARAMS);
2226 goto unlock;
2227 }
2228
2229 update_class:
2230 hci_req_init(&req, hdev);
2231
2232 update_class(&req);
2233 update_eir(&req);
2234
2235 err = hci_req_run(&req, remove_uuid_complete);
2236 if (err < 0) {
2237 if (err != -ENODATA)
2238 goto unlock;
2239
2240 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2241 hdev->dev_class, 3);
2242 goto unlock;
2243 }
2244
2245 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2246 if (!cmd) {
2247 err = -ENOMEM;
2248 goto unlock;
2249 }
2250
2251 err = 0;
2252
2253 unlock:
2254 hci_dev_unlock(hdev);
2255 return err;
2256 }
2257
2258 static void set_class_complete(struct hci_dev *hdev, u8 status)
2259 {
2260 BT_DBG("status 0x%02x", status);
2261
2262 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2263 }
2264
2265 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2266 u16 len)
2267 {
2268 struct mgmt_cp_set_dev_class *cp = data;
2269 struct pending_cmd *cmd;
2270 struct hci_request req;
2271 int err;
2272
2273 BT_DBG("request for %s", hdev->name);
2274
2275 if (!lmp_bredr_capable(hdev))
2276 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2277 MGMT_STATUS_NOT_SUPPORTED);
2278
2279 hci_dev_lock(hdev);
2280
2281 if (pending_eir_or_class(hdev)) {
2282 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 MGMT_STATUS_BUSY);
2284 goto unlock;
2285 }
2286
2287 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2288 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2289 MGMT_STATUS_INVALID_PARAMS);
2290 goto unlock;
2291 }
2292
2293 hdev->major_class = cp->major;
2294 hdev->minor_class = cp->minor;
2295
2296 if (!hdev_is_powered(hdev)) {
2297 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2298 hdev->dev_class, 3);
2299 goto unlock;
2300 }
2301
2302 hci_req_init(&req, hdev);
2303
2304 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2305 hci_dev_unlock(hdev);
2306 cancel_delayed_work_sync(&hdev->service_cache);
2307 hci_dev_lock(hdev);
2308 update_eir(&req);
2309 }
2310
2311 update_class(&req);
2312
2313 err = hci_req_run(&req, set_class_complete);
2314 if (err < 0) {
2315 if (err != -ENODATA)
2316 goto unlock;
2317
2318 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2319 hdev->dev_class, 3);
2320 goto unlock;
2321 }
2322
2323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2324 if (!cmd) {
2325 err = -ENOMEM;
2326 goto unlock;
2327 }
2328
2329 err = 0;
2330
2331 unlock:
2332 hci_dev_unlock(hdev);
2333 return err;
2334 }
2335
2336 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2337 u16 len)
2338 {
2339 struct mgmt_cp_load_link_keys *cp = data;
2340 u16 key_count, expected_len;
2341 bool changed;
2342 int i;
2343
2344 BT_DBG("request for %s", hdev->name);
2345
2346 if (!lmp_bredr_capable(hdev))
2347 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2349
2350 key_count = __le16_to_cpu(cp->key_count);
2351
2352 expected_len = sizeof(*cp) + key_count *
2353 sizeof(struct mgmt_link_key_info);
2354 if (expected_len != len) {
2355 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2356 expected_len, len);
2357 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358 MGMT_STATUS_INVALID_PARAMS);
2359 }
2360
2361 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2362 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2364
2365 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2366 key_count);
2367
2368 for (i = 0; i < key_count; i++) {
2369 struct mgmt_link_key_info *key = &cp->keys[i];
2370
2371 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2372 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2373 MGMT_STATUS_INVALID_PARAMS);
2374 }
2375
2376 hci_dev_lock(hdev);
2377
2378 hci_link_keys_clear(hdev);
2379
2380 if (cp->debug_keys)
2381 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2382 else
2383 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2384
2385 if (changed)
2386 new_settings(hdev, NULL);
2387
2388 for (i = 0; i < key_count; i++) {
2389 struct mgmt_link_key_info *key = &cp->keys[i];
2390
2391 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2392 key->type, key->pin_len);
2393 }
2394
2395 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2396
2397 hci_dev_unlock(hdev);
2398
2399 return 0;
2400 }
2401
2402 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2403 u8 addr_type, struct sock *skip_sk)
2404 {
2405 struct mgmt_ev_device_unpaired ev;
2406
2407 bacpy(&ev.addr.bdaddr, bdaddr);
2408 ev.addr.type = addr_type;
2409
2410 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2411 skip_sk);
2412 }
2413
2414 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2415 u16 len)
2416 {
2417 struct mgmt_cp_unpair_device *cp = data;
2418 struct mgmt_rp_unpair_device rp;
2419 struct hci_cp_disconnect dc;
2420 struct pending_cmd *cmd;
2421 struct hci_conn *conn;
2422 int err;
2423
2424 memset(&rp, 0, sizeof(rp));
2425 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2426 rp.addr.type = cp->addr.type;
2427
2428 if (!bdaddr_type_is_valid(cp->addr.type))
2429 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2430 MGMT_STATUS_INVALID_PARAMS,
2431 &rp, sizeof(rp));
2432
2433 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2434 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2435 MGMT_STATUS_INVALID_PARAMS,
2436 &rp, sizeof(rp));
2437
2438 hci_dev_lock(hdev);
2439
2440 if (!hdev_is_powered(hdev)) {
2441 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2443 goto unlock;
2444 }
2445
2446 if (cp->addr.type == BDADDR_BREDR) {
2447 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2448 } else {
2449 u8 addr_type;
2450
2451 if (cp->addr.type == BDADDR_LE_PUBLIC)
2452 addr_type = ADDR_LE_DEV_PUBLIC;
2453 else
2454 addr_type = ADDR_LE_DEV_RANDOM;
2455
2456 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2457
2458 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2459
2460 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2461 }
2462
2463 if (err < 0) {
2464 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2466 goto unlock;
2467 }
2468
2469 if (cp->disconnect) {
2470 if (cp->addr.type == BDADDR_BREDR)
2471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2472 &cp->addr.bdaddr);
2473 else
2474 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2475 &cp->addr.bdaddr);
2476 } else {
2477 conn = NULL;
2478 }
2479
2480 if (!conn) {
2481 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2482 &rp, sizeof(rp));
2483 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2484 goto unlock;
2485 }
2486
2487 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2488 sizeof(*cp));
2489 if (!cmd) {
2490 err = -ENOMEM;
2491 goto unlock;
2492 }
2493
2494 dc.handle = cpu_to_le16(conn->handle);
2495 dc.reason = 0x13; /* Remote User Terminated Connection */
2496 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2497 if (err < 0)
2498 mgmt_pending_remove(cmd);
2499
2500 unlock:
2501 hci_dev_unlock(hdev);
2502 return err;
2503 }
2504
2505 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2506 u16 len)
2507 {
2508 struct mgmt_cp_disconnect *cp = data;
2509 struct mgmt_rp_disconnect rp;
2510 struct hci_cp_disconnect dc;
2511 struct pending_cmd *cmd;
2512 struct hci_conn *conn;
2513 int err;
2514
2515 BT_DBG("");
2516
2517 memset(&rp, 0, sizeof(rp));
2518 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2519 rp.addr.type = cp->addr.type;
2520
2521 if (!bdaddr_type_is_valid(cp->addr.type))
2522 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2523 MGMT_STATUS_INVALID_PARAMS,
2524 &rp, sizeof(rp));
2525
2526 hci_dev_lock(hdev);
2527
2528 if (!test_bit(HCI_UP, &hdev->flags)) {
2529 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2530 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2531 goto failed;
2532 }
2533
2534 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2535 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2536 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2537 goto failed;
2538 }
2539
2540 if (cp->addr.type == BDADDR_BREDR)
2541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2542 &cp->addr.bdaddr);
2543 else
2544 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2545
2546 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2547 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2548 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2549 goto failed;
2550 }
2551
2552 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2553 if (!cmd) {
2554 err = -ENOMEM;
2555 goto failed;
2556 }
2557
2558 dc.handle = cpu_to_le16(conn->handle);
2559 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2560
2561 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2562 if (err < 0)
2563 mgmt_pending_remove(cmd);
2564
2565 failed:
2566 hci_dev_unlock(hdev);
2567 return err;
2568 }
2569
2570 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2571 {
2572 switch (link_type) {
2573 case LE_LINK:
2574 switch (addr_type) {
2575 case ADDR_LE_DEV_PUBLIC:
2576 return BDADDR_LE_PUBLIC;
2577
2578 default:
2579 /* Fallback to LE Random address type */
2580 return BDADDR_LE_RANDOM;
2581 }
2582
2583 default:
2584 /* Fallback to BR/EDR type */
2585 return BDADDR_BREDR;
2586 }
2587 }
2588
2589 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2590 u16 data_len)
2591 {
2592 struct mgmt_rp_get_connections *rp;
2593 struct hci_conn *c;
2594 size_t rp_len;
2595 int err;
2596 u16 i;
2597
2598 BT_DBG("");
2599
2600 hci_dev_lock(hdev);
2601
2602 if (!hdev_is_powered(hdev)) {
2603 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2604 MGMT_STATUS_NOT_POWERED);
2605 goto unlock;
2606 }
2607
2608 i = 0;
2609 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2610 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2611 i++;
2612 }
2613
2614 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2615 rp = kmalloc(rp_len, GFP_KERNEL);
2616 if (!rp) {
2617 err = -ENOMEM;
2618 goto unlock;
2619 }
2620
2621 i = 0;
2622 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2623 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2624 continue;
2625 bacpy(&rp->addr[i].bdaddr, &c->dst);
2626 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2627 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2628 continue;
2629 i++;
2630 }
2631
2632 rp->conn_count = cpu_to_le16(i);
2633
2634 /* Recalculate length in case of filtered SCO connections, etc */
2635 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2636
2637 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2638 rp_len);
2639
2640 kfree(rp);
2641
2642 unlock:
2643 hci_dev_unlock(hdev);
2644 return err;
2645 }
2646
2647 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2648 struct mgmt_cp_pin_code_neg_reply *cp)
2649 {
2650 struct pending_cmd *cmd;
2651 int err;
2652
2653 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2654 sizeof(*cp));
2655 if (!cmd)
2656 return -ENOMEM;
2657
2658 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2660 if (err < 0)
2661 mgmt_pending_remove(cmd);
2662
2663 return err;
2664 }
2665
2666 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2667 u16 len)
2668 {
2669 struct hci_conn *conn;
2670 struct mgmt_cp_pin_code_reply *cp = data;
2671 struct hci_cp_pin_code_reply reply;
2672 struct pending_cmd *cmd;
2673 int err;
2674
2675 BT_DBG("");
2676
2677 hci_dev_lock(hdev);
2678
2679 if (!hdev_is_powered(hdev)) {
2680 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2681 MGMT_STATUS_NOT_POWERED);
2682 goto failed;
2683 }
2684
2685 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2686 if (!conn) {
2687 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2688 MGMT_STATUS_NOT_CONNECTED);
2689 goto failed;
2690 }
2691
2692 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2693 struct mgmt_cp_pin_code_neg_reply ncp;
2694
2695 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2696
2697 BT_ERR("PIN code is not 16 bytes long");
2698
2699 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2700 if (err >= 0)
2701 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2702 MGMT_STATUS_INVALID_PARAMS);
2703
2704 goto failed;
2705 }
2706
2707 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2708 if (!cmd) {
2709 err = -ENOMEM;
2710 goto failed;
2711 }
2712
2713 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2714 reply.pin_len = cp->pin_len;
2715 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2716
2717 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2718 if (err < 0)
2719 mgmt_pending_remove(cmd);
2720
2721 failed:
2722 hci_dev_unlock(hdev);
2723 return err;
2724 }
2725
2726 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_set_io_capability *cp = data;
2730
2731 BT_DBG("");
2732
2733 hci_dev_lock(hdev);
2734
2735 hdev->io_capability = cp->io_capability;
2736
2737 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2738 hdev->io_capability);
2739
2740 hci_dev_unlock(hdev);
2741
2742 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2743 0);
2744 }
2745
2746 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2747 {
2748 struct hci_dev *hdev = conn->hdev;
2749 struct pending_cmd *cmd;
2750
2751 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2752 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2753 continue;
2754
2755 if (cmd->user_data != conn)
2756 continue;
2757
2758 return cmd;
2759 }
2760
2761 return NULL;
2762 }
2763
2764 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2765 {
2766 struct mgmt_rp_pair_device rp;
2767 struct hci_conn *conn = cmd->user_data;
2768
2769 bacpy(&rp.addr.bdaddr, &conn->dst);
2770 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2771
2772 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2773 &rp, sizeof(rp));
2774
2775 /* So we don't get further callbacks for this connection */
2776 conn->connect_cfm_cb = NULL;
2777 conn->security_cfm_cb = NULL;
2778 conn->disconn_cfm_cb = NULL;
2779
2780 hci_conn_drop(conn);
2781
2782 mgmt_pending_remove(cmd);
2783 }
2784
2785 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2786 {
2787 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2788 struct pending_cmd *cmd;
2789
2790 cmd = find_pairing(conn);
2791 if (cmd)
2792 pairing_complete(cmd, status);
2793 }
2794
2795 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2796 {
2797 struct pending_cmd *cmd;
2798
2799 BT_DBG("status %u", status);
2800
2801 cmd = find_pairing(conn);
2802 if (!cmd)
2803 BT_DBG("Unable to find a pending command");
2804 else
2805 pairing_complete(cmd, mgmt_status(status));
2806 }
2807
2808 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2809 {
2810 struct pending_cmd *cmd;
2811
2812 BT_DBG("status %u", status);
2813
2814 if (!status)
2815 return;
2816
2817 cmd = find_pairing(conn);
2818 if (!cmd)
2819 BT_DBG("Unable to find a pending command");
2820 else
2821 pairing_complete(cmd, mgmt_status(status));
2822 }
2823
2824 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2825 u16 len)
2826 {
2827 struct mgmt_cp_pair_device *cp = data;
2828 struct mgmt_rp_pair_device rp;
2829 struct pending_cmd *cmd;
2830 u8 sec_level, auth_type;
2831 struct hci_conn *conn;
2832 int err;
2833
2834 BT_DBG("");
2835
2836 memset(&rp, 0, sizeof(rp));
2837 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2838 rp.addr.type = cp->addr.type;
2839
2840 if (!bdaddr_type_is_valid(cp->addr.type))
2841 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2842 MGMT_STATUS_INVALID_PARAMS,
2843 &rp, sizeof(rp));
2844
2845 hci_dev_lock(hdev);
2846
2847 if (!hdev_is_powered(hdev)) {
2848 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2849 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2850 goto unlock;
2851 }
2852
2853 sec_level = BT_SECURITY_MEDIUM;
2854 auth_type = HCI_AT_DEDICATED_BONDING;
2855
2856 if (cp->addr.type == BDADDR_BREDR) {
2857 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2858 auth_type);
2859 } else {
2860 u8 addr_type;
2861
2862 /* Convert from L2CAP channel address type to HCI address type
2863 */
2864 if (cp->addr.type == BDADDR_LE_PUBLIC)
2865 addr_type = ADDR_LE_DEV_PUBLIC;
2866 else
2867 addr_type = ADDR_LE_DEV_RANDOM;
2868
2869 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2870 sec_level, auth_type);
2871 }
2872
2873 if (IS_ERR(conn)) {
2874 int status;
2875
2876 if (PTR_ERR(conn) == -EBUSY)
2877 status = MGMT_STATUS_BUSY;
2878 else
2879 status = MGMT_STATUS_CONNECT_FAILED;
2880
2881 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2882 status, &rp,
2883 sizeof(rp));
2884 goto unlock;
2885 }
2886
2887 if (conn->connect_cfm_cb) {
2888 hci_conn_drop(conn);
2889 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2890 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2891 goto unlock;
2892 }
2893
2894 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2895 if (!cmd) {
2896 err = -ENOMEM;
2897 hci_conn_drop(conn);
2898 goto unlock;
2899 }
2900
2901 /* For LE, just connecting isn't a proof that the pairing finished */
2902 if (cp->addr.type == BDADDR_BREDR) {
2903 conn->connect_cfm_cb = pairing_complete_cb;
2904 conn->security_cfm_cb = pairing_complete_cb;
2905 conn->disconn_cfm_cb = pairing_complete_cb;
2906 } else {
2907 conn->connect_cfm_cb = le_pairing_complete_cb;
2908 conn->security_cfm_cb = le_pairing_complete_cb;
2909 conn->disconn_cfm_cb = le_pairing_complete_cb;
2910 }
2911
2912 conn->io_capability = cp->io_cap;
2913 cmd->user_data = conn;
2914
2915 if (conn->state == BT_CONNECTED &&
2916 hci_conn_security(conn, sec_level, auth_type))
2917 pairing_complete(cmd, 0);
2918
2919 err = 0;
2920
2921 unlock:
2922 hci_dev_unlock(hdev);
2923 return err;
2924 }
2925
2926 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2927 u16 len)
2928 {
2929 struct mgmt_addr_info *addr = data;
2930 struct pending_cmd *cmd;
2931 struct hci_conn *conn;
2932 int err;
2933
2934 BT_DBG("");
2935
2936 hci_dev_lock(hdev);
2937
2938 if (!hdev_is_powered(hdev)) {
2939 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2940 MGMT_STATUS_NOT_POWERED);
2941 goto unlock;
2942 }
2943
2944 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2945 if (!cmd) {
2946 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2947 MGMT_STATUS_INVALID_PARAMS);
2948 goto unlock;
2949 }
2950
2951 conn = cmd->user_data;
2952
2953 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2954 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2955 MGMT_STATUS_INVALID_PARAMS);
2956 goto unlock;
2957 }
2958
2959 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2960
2961 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2962 addr, sizeof(*addr));
2963 unlock:
2964 hci_dev_unlock(hdev);
2965 return err;
2966 }
2967
2968 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2969 struct mgmt_addr_info *addr, u16 mgmt_op,
2970 u16 hci_op, __le32 passkey)
2971 {
2972 struct pending_cmd *cmd;
2973 struct hci_conn *conn;
2974 int err;
2975
2976 hci_dev_lock(hdev);
2977
2978 if (!hdev_is_powered(hdev)) {
2979 err = cmd_complete(sk, hdev->id, mgmt_op,
2980 MGMT_STATUS_NOT_POWERED, addr,
2981 sizeof(*addr));
2982 goto done;
2983 }
2984
2985 if (addr->type == BDADDR_BREDR)
2986 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2987 else
2988 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2989
2990 if (!conn) {
2991 err = cmd_complete(sk, hdev->id, mgmt_op,
2992 MGMT_STATUS_NOT_CONNECTED, addr,
2993 sizeof(*addr));
2994 goto done;
2995 }
2996
2997 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2998 /* Continue with pairing via SMP */
2999 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3000
3001 if (!err)
3002 err = cmd_complete(sk, hdev->id, mgmt_op,
3003 MGMT_STATUS_SUCCESS, addr,
3004 sizeof(*addr));
3005 else
3006 err = cmd_complete(sk, hdev->id, mgmt_op,
3007 MGMT_STATUS_FAILED, addr,
3008 sizeof(*addr));
3009
3010 goto done;
3011 }
3012
3013 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3014 if (!cmd) {
3015 err = -ENOMEM;
3016 goto done;
3017 }
3018
3019 /* Continue with pairing via HCI */
3020 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3021 struct hci_cp_user_passkey_reply cp;
3022
3023 bacpy(&cp.bdaddr, &addr->bdaddr);
3024 cp.passkey = passkey;
3025 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3026 } else
3027 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3028 &addr->bdaddr);
3029
3030 if (err < 0)
3031 mgmt_pending_remove(cmd);
3032
3033 done:
3034 hci_dev_unlock(hdev);
3035 return err;
3036 }
3037
3038 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3039 void *data, u16 len)
3040 {
3041 struct mgmt_cp_pin_code_neg_reply *cp = data;
3042
3043 BT_DBG("");
3044
3045 return user_pairing_resp(sk, hdev, &cp->addr,
3046 MGMT_OP_PIN_CODE_NEG_REPLY,
3047 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3048 }
3049
3050 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3051 u16 len)
3052 {
3053 struct mgmt_cp_user_confirm_reply *cp = data;
3054
3055 BT_DBG("");
3056
3057 if (len != sizeof(*cp))
3058 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3059 MGMT_STATUS_INVALID_PARAMS);
3060
3061 return user_pairing_resp(sk, hdev, &cp->addr,
3062 MGMT_OP_USER_CONFIRM_REPLY,
3063 HCI_OP_USER_CONFIRM_REPLY, 0);
3064 }
3065
3066 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3067 void *data, u16 len)
3068 {
3069 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3070
3071 BT_DBG("");
3072
3073 return user_pairing_resp(sk, hdev, &cp->addr,
3074 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3075 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3076 }
3077
3078 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3079 u16 len)
3080 {
3081 struct mgmt_cp_user_passkey_reply *cp = data;
3082
3083 BT_DBG("");
3084
3085 return user_pairing_resp(sk, hdev, &cp->addr,
3086 MGMT_OP_USER_PASSKEY_REPLY,
3087 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3088 }
3089
3090 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3091 void *data, u16 len)
3092 {
3093 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3094
3095 BT_DBG("");
3096
3097 return user_pairing_resp(sk, hdev, &cp->addr,
3098 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3099 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3100 }
3101
3102 static void update_name(struct hci_request *req)
3103 {
3104 struct hci_dev *hdev = req->hdev;
3105 struct hci_cp_write_local_name cp;
3106
3107 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3108
3109 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3110 }
3111
3112 static void set_name_complete(struct hci_dev *hdev, u8 status)
3113 {
3114 struct mgmt_cp_set_local_name *cp;
3115 struct pending_cmd *cmd;
3116
3117 BT_DBG("status 0x%02x", status);
3118
3119 hci_dev_lock(hdev);
3120
3121 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3122 if (!cmd)
3123 goto unlock;
3124
3125 cp = cmd->param;
3126
3127 if (status)
3128 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3129 mgmt_status(status));
3130 else
3131 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3132 cp, sizeof(*cp));
3133
3134 mgmt_pending_remove(cmd);
3135
3136 unlock:
3137 hci_dev_unlock(hdev);
3138 }
3139
3140 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3141 u16 len)
3142 {
3143 struct mgmt_cp_set_local_name *cp = data;
3144 struct pending_cmd *cmd;
3145 struct hci_request req;
3146 int err;
3147
3148 BT_DBG("");
3149
3150 hci_dev_lock(hdev);
3151
3152 /* If the old values are the same as the new ones just return a
3153 * direct command complete event.
3154 */
3155 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3156 !memcmp(hdev->short_name, cp->short_name,
3157 sizeof(hdev->short_name))) {
3158 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3159 data, len);
3160 goto failed;
3161 }
3162
3163 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3164
3165 if (!hdev_is_powered(hdev)) {
3166 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3167
3168 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3169 data, len);
3170 if (err < 0)
3171 goto failed;
3172
3173 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3174 sk);
3175
3176 goto failed;
3177 }
3178
3179 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3180 if (!cmd) {
3181 err = -ENOMEM;
3182 goto failed;
3183 }
3184
3185 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3186
3187 hci_req_init(&req, hdev);
3188
3189 if (lmp_bredr_capable(hdev)) {
3190 update_name(&req);
3191 update_eir(&req);
3192 }
3193
3194 /* The name is stored in the scan response data and so
3195 * no need to udpate the advertising data here.
3196 */
3197 if (lmp_le_capable(hdev))
3198 update_scan_rsp_data(&req);
3199
3200 err = hci_req_run(&req, set_name_complete);
3201 if (err < 0)
3202 mgmt_pending_remove(cmd);
3203
3204 failed:
3205 hci_dev_unlock(hdev);
3206 return err;
3207 }
3208
3209 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3210 void *data, u16 data_len)
3211 {
3212 struct pending_cmd *cmd;
3213 int err;
3214
3215 BT_DBG("%s", hdev->name);
3216
3217 hci_dev_lock(hdev);
3218
3219 if (!hdev_is_powered(hdev)) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3221 MGMT_STATUS_NOT_POWERED);
3222 goto unlock;
3223 }
3224
3225 if (!lmp_ssp_capable(hdev)) {
3226 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3227 MGMT_STATUS_NOT_SUPPORTED);
3228 goto unlock;
3229 }
3230
3231 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3232 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3233 MGMT_STATUS_BUSY);
3234 goto unlock;
3235 }
3236
3237 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3238 if (!cmd) {
3239 err = -ENOMEM;
3240 goto unlock;
3241 }
3242
3243 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3244 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3245 0, NULL);
3246 else
3247 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3248
3249 if (err < 0)
3250 mgmt_pending_remove(cmd);
3251
3252 unlock:
3253 hci_dev_unlock(hdev);
3254 return err;
3255 }
3256
3257 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3258 void *data, u16 len)
3259 {
3260 int err;
3261
3262 BT_DBG("%s ", hdev->name);
3263
3264 hci_dev_lock(hdev);
3265
3266 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3267 struct mgmt_cp_add_remote_oob_data *cp = data;
3268 u8 status;
3269
3270 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3271 cp->hash, cp->randomizer);
3272 if (err < 0)
3273 status = MGMT_STATUS_FAILED;
3274 else
3275 status = MGMT_STATUS_SUCCESS;
3276
3277 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3278 status, &cp->addr, sizeof(cp->addr));
3279 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3280 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3281 u8 status;
3282
3283 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3284 cp->hash192,
3285 cp->randomizer192,
3286 cp->hash256,
3287 cp->randomizer256);
3288 if (err < 0)
3289 status = MGMT_STATUS_FAILED;
3290 else
3291 status = MGMT_STATUS_SUCCESS;
3292
3293 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3294 status, &cp->addr, sizeof(cp->addr));
3295 } else {
3296 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3297 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3298 MGMT_STATUS_INVALID_PARAMS);
3299 }
3300
3301 hci_dev_unlock(hdev);
3302 return err;
3303 }
3304
3305 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3306 void *data, u16 len)
3307 {
3308 struct mgmt_cp_remove_remote_oob_data *cp = data;
3309 u8 status;
3310 int err;
3311
3312 BT_DBG("%s", hdev->name);
3313
3314 hci_dev_lock(hdev);
3315
3316 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3317 if (err < 0)
3318 status = MGMT_STATUS_INVALID_PARAMS;
3319 else
3320 status = MGMT_STATUS_SUCCESS;
3321
3322 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3323 status, &cp->addr, sizeof(cp->addr));
3324
3325 hci_dev_unlock(hdev);
3326 return err;
3327 }
3328
3329 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3330 {
3331 struct pending_cmd *cmd;
3332 u8 type;
3333 int err;
3334
3335 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3336
3337 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3338 if (!cmd)
3339 return -ENOENT;
3340
3341 type = hdev->discovery.type;
3342
3343 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3344 &type, sizeof(type));
3345 mgmt_pending_remove(cmd);
3346
3347 return err;
3348 }
3349
3350 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3351 {
3352 unsigned long timeout = 0;
3353
3354 BT_DBG("status %d", status);
3355
3356 if (status) {
3357 hci_dev_lock(hdev);
3358 mgmt_start_discovery_failed(hdev, status);
3359 hci_dev_unlock(hdev);
3360 return;
3361 }
3362
3363 hci_dev_lock(hdev);
3364 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3365 hci_dev_unlock(hdev);
3366
3367 switch (hdev->discovery.type) {
3368 case DISCOV_TYPE_LE:
3369 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3370 break;
3371
3372 case DISCOV_TYPE_INTERLEAVED:
3373 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3374 break;
3375
3376 case DISCOV_TYPE_BREDR:
3377 break;
3378
3379 default:
3380 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3381 }
3382
3383 if (!timeout)
3384 return;
3385
3386 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3387 }
3388
3389 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3391 {
3392 struct mgmt_cp_start_discovery *cp = data;
3393 struct pending_cmd *cmd;
3394 struct hci_cp_le_set_scan_param param_cp;
3395 struct hci_cp_le_set_scan_enable enable_cp;
3396 struct hci_cp_inquiry inq_cp;
3397 struct hci_request req;
3398 /* General inquiry access code (GIAC) */
3399 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3400 u8 status, own_addr_type;
3401 int err;
3402
3403 BT_DBG("%s", hdev->name);
3404
3405 hci_dev_lock(hdev);
3406
3407 if (!hdev_is_powered(hdev)) {
3408 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3409 MGMT_STATUS_NOT_POWERED);
3410 goto failed;
3411 }
3412
3413 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3414 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3415 MGMT_STATUS_BUSY);
3416 goto failed;
3417 }
3418
3419 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3420 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3421 MGMT_STATUS_BUSY);
3422 goto failed;
3423 }
3424
3425 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3426 if (!cmd) {
3427 err = -ENOMEM;
3428 goto failed;
3429 }
3430
3431 hdev->discovery.type = cp->type;
3432
3433 hci_req_init(&req, hdev);
3434
3435 switch (hdev->discovery.type) {
3436 case DISCOV_TYPE_BREDR:
3437 status = mgmt_bredr_support(hdev);
3438 if (status) {
3439 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3440 status);
3441 mgmt_pending_remove(cmd);
3442 goto failed;
3443 }
3444
3445 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3446 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3447 MGMT_STATUS_BUSY);
3448 mgmt_pending_remove(cmd);
3449 goto failed;
3450 }
3451
3452 hci_inquiry_cache_flush(hdev);
3453
3454 memset(&inq_cp, 0, sizeof(inq_cp));
3455 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3456 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3457 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3458 break;
3459
3460 case DISCOV_TYPE_LE:
3461 case DISCOV_TYPE_INTERLEAVED:
3462 status = mgmt_le_support(hdev);
3463 if (status) {
3464 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3465 status);
3466 mgmt_pending_remove(cmd);
3467 goto failed;
3468 }
3469
3470 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3471 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3472 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3473 MGMT_STATUS_NOT_SUPPORTED);
3474 mgmt_pending_remove(cmd);
3475 goto failed;
3476 }
3477
3478 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3479 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3480 MGMT_STATUS_REJECTED);
3481 mgmt_pending_remove(cmd);
3482 goto failed;
3483 }
3484
3485 /* If controller is scanning, it means the background scanning
3486 * is running. Thus, we should temporarily stop it in order to
3487 * set the discovery scanning parameters.
3488 */
3489 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3490 hci_req_add_le_scan_disable(&req);
3491
3492 memset(&param_cp, 0, sizeof(param_cp));
3493
3494 /* All active scans will be done with either a resolvable
3495 * private address (when privacy feature has been enabled)
3496 * or unresolvable private address.
3497 */
3498 err = hci_update_random_address(&req, true, &own_addr_type);
3499 if (err < 0) {
3500 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3501 MGMT_STATUS_FAILED);
3502 mgmt_pending_remove(cmd);
3503 goto failed;
3504 }
3505
3506 param_cp.type = LE_SCAN_ACTIVE;
3507 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3508 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3509 param_cp.own_address_type = own_addr_type;
3510 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3511 &param_cp);
3512
3513 memset(&enable_cp, 0, sizeof(enable_cp));
3514 enable_cp.enable = LE_SCAN_ENABLE;
3515 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3516 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3517 &enable_cp);
3518 break;
3519
3520 default:
3521 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3522 MGMT_STATUS_INVALID_PARAMS);
3523 mgmt_pending_remove(cmd);
3524 goto failed;
3525 }
3526
3527 err = hci_req_run(&req, start_discovery_complete);
3528 if (err < 0)
3529 mgmt_pending_remove(cmd);
3530 else
3531 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3532
3533 failed:
3534 hci_dev_unlock(hdev);
3535 return err;
3536 }
3537
3538 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3539 {
3540 struct pending_cmd *cmd;
3541 int err;
3542
3543 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3544 if (!cmd)
3545 return -ENOENT;
3546
3547 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3548 &hdev->discovery.type, sizeof(hdev->discovery.type));
3549 mgmt_pending_remove(cmd);
3550
3551 return err;
3552 }
3553
3554 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3555 {
3556 BT_DBG("status %d", status);
3557
3558 hci_dev_lock(hdev);
3559
3560 if (status) {
3561 mgmt_stop_discovery_failed(hdev, status);
3562 goto unlock;
3563 }
3564
3565 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3566
3567 unlock:
3568 hci_dev_unlock(hdev);
3569 }
3570
3571 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3572 u16 len)
3573 {
3574 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3575 struct pending_cmd *cmd;
3576 struct hci_cp_remote_name_req_cancel cp;
3577 struct inquiry_entry *e;
3578 struct hci_request req;
3579 int err;
3580
3581 BT_DBG("%s", hdev->name);
3582
3583 hci_dev_lock(hdev);
3584
3585 if (!hci_discovery_active(hdev)) {
3586 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3587 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3588 sizeof(mgmt_cp->type));
3589 goto unlock;
3590 }
3591
3592 if (hdev->discovery.type != mgmt_cp->type) {
3593 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3594 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3595 sizeof(mgmt_cp->type));
3596 goto unlock;
3597 }
3598
3599 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3600 if (!cmd) {
3601 err = -ENOMEM;
3602 goto unlock;
3603 }
3604
3605 hci_req_init(&req, hdev);
3606
3607 switch (hdev->discovery.state) {
3608 case DISCOVERY_FINDING:
3609 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3610 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3611 } else {
3612 cancel_delayed_work(&hdev->le_scan_disable);
3613
3614 hci_req_add_le_scan_disable(&req);
3615 }
3616
3617 break;
3618
3619 case DISCOVERY_RESOLVING:
3620 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3621 NAME_PENDING);
3622 if (!e) {
3623 mgmt_pending_remove(cmd);
3624 err = cmd_complete(sk, hdev->id,
3625 MGMT_OP_STOP_DISCOVERY, 0,
3626 &mgmt_cp->type,
3627 sizeof(mgmt_cp->type));
3628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3629 goto unlock;
3630 }
3631
3632 bacpy(&cp.bdaddr, &e->data.bdaddr);
3633 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3634 &cp);
3635
3636 break;
3637
3638 default:
3639 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3640
3641 mgmt_pending_remove(cmd);
3642 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3643 MGMT_STATUS_FAILED, &mgmt_cp->type,
3644 sizeof(mgmt_cp->type));
3645 goto unlock;
3646 }
3647
3648 err = hci_req_run(&req, stop_discovery_complete);
3649 if (err < 0)
3650 mgmt_pending_remove(cmd);
3651 else
3652 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3653
3654 unlock:
3655 hci_dev_unlock(hdev);
3656 return err;
3657 }
3658
3659 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3660 u16 len)
3661 {
3662 struct mgmt_cp_confirm_name *cp = data;
3663 struct inquiry_entry *e;
3664 int err;
3665
3666 BT_DBG("%s", hdev->name);
3667
3668 hci_dev_lock(hdev);
3669
3670 if (!hci_discovery_active(hdev)) {
3671 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3672 MGMT_STATUS_FAILED, &cp->addr,
3673 sizeof(cp->addr));
3674 goto failed;
3675 }
3676
3677 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3678 if (!e) {
3679 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3680 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3681 sizeof(cp->addr));
3682 goto failed;
3683 }
3684
3685 if (cp->name_known) {
3686 e->name_state = NAME_KNOWN;
3687 list_del(&e->list);
3688 } else {
3689 e->name_state = NAME_NEEDED;
3690 hci_inquiry_cache_update_resolve(hdev, e);
3691 }
3692
3693 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3694 sizeof(cp->addr));
3695
3696 failed:
3697 hci_dev_unlock(hdev);
3698 return err;
3699 }
3700
3701 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3702 u16 len)
3703 {
3704 struct mgmt_cp_block_device *cp = data;
3705 u8 status;
3706 int err;
3707
3708 BT_DBG("%s", hdev->name);
3709
3710 if (!bdaddr_type_is_valid(cp->addr.type))
3711 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3712 MGMT_STATUS_INVALID_PARAMS,
3713 &cp->addr, sizeof(cp->addr));
3714
3715 hci_dev_lock(hdev);
3716
3717 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3718 if (err < 0)
3719 status = MGMT_STATUS_FAILED;
3720 else
3721 status = MGMT_STATUS_SUCCESS;
3722
3723 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3724 &cp->addr, sizeof(cp->addr));
3725
3726 hci_dev_unlock(hdev);
3727
3728 return err;
3729 }
3730
3731 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3732 u16 len)
3733 {
3734 struct mgmt_cp_unblock_device *cp = data;
3735 u8 status;
3736 int err;
3737
3738 BT_DBG("%s", hdev->name);
3739
3740 if (!bdaddr_type_is_valid(cp->addr.type))
3741 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3742 MGMT_STATUS_INVALID_PARAMS,
3743 &cp->addr, sizeof(cp->addr));
3744
3745 hci_dev_lock(hdev);
3746
3747 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3748 if (err < 0)
3749 status = MGMT_STATUS_INVALID_PARAMS;
3750 else
3751 status = MGMT_STATUS_SUCCESS;
3752
3753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3754 &cp->addr, sizeof(cp->addr));
3755
3756 hci_dev_unlock(hdev);
3757
3758 return err;
3759 }
3760
3761 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3762 u16 len)
3763 {
3764 struct mgmt_cp_set_device_id *cp = data;
3765 struct hci_request req;
3766 int err;
3767 __u16 source;
3768
3769 BT_DBG("%s", hdev->name);
3770
3771 source = __le16_to_cpu(cp->source);
3772
3773 if (source > 0x0002)
3774 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3775 MGMT_STATUS_INVALID_PARAMS);
3776
3777 hci_dev_lock(hdev);
3778
3779 hdev->devid_source = source;
3780 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3781 hdev->devid_product = __le16_to_cpu(cp->product);
3782 hdev->devid_version = __le16_to_cpu(cp->version);
3783
3784 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3785
3786 hci_req_init(&req, hdev);
3787 update_eir(&req);
3788 hci_req_run(&req, NULL);
3789
3790 hci_dev_unlock(hdev);
3791
3792 return err;
3793 }
3794
3795 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3796 {
3797 struct cmd_lookup match = { NULL, hdev };
3798
3799 if (status) {
3800 u8 mgmt_err = mgmt_status(status);
3801
3802 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3803 cmd_status_rsp, &mgmt_err);
3804 return;
3805 }
3806
3807 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3808 &match);
3809
3810 new_settings(hdev, match.sk);
3811
3812 if (match.sk)
3813 sock_put(match.sk);
3814 }
3815
3816 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3817 u16 len)
3818 {
3819 struct mgmt_mode *cp = data;
3820 struct pending_cmd *cmd;
3821 struct hci_request req;
3822 u8 val, enabled, status;
3823 int err;
3824
3825 BT_DBG("request for %s", hdev->name);
3826
3827 status = mgmt_le_support(hdev);
3828 if (status)
3829 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3830 status);
3831
3832 if (cp->val != 0x00 && cp->val != 0x01)
3833 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3834 MGMT_STATUS_INVALID_PARAMS);
3835
3836 hci_dev_lock(hdev);
3837
3838 val = !!cp->val;
3839 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3840
3841 /* The following conditions are ones which mean that we should
3842 * not do any HCI communication but directly send a mgmt
3843 * response to user space (after toggling the flag if
3844 * necessary).
3845 */
3846 if (!hdev_is_powered(hdev) || val == enabled ||
3847 hci_conn_num(hdev, LE_LINK) > 0) {
3848 bool changed = false;
3849
3850 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3851 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3852 changed = true;
3853 }
3854
3855 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3856 if (err < 0)
3857 goto unlock;
3858
3859 if (changed)
3860 err = new_settings(hdev, sk);
3861
3862 goto unlock;
3863 }
3864
3865 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3866 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3867 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3868 MGMT_STATUS_BUSY);
3869 goto unlock;
3870 }
3871
3872 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3873 if (!cmd) {
3874 err = -ENOMEM;
3875 goto unlock;
3876 }
3877
3878 hci_req_init(&req, hdev);
3879
3880 if (val)
3881 enable_advertising(&req);
3882 else
3883 disable_advertising(&req);
3884
3885 err = hci_req_run(&req, set_advertising_complete);
3886 if (err < 0)
3887 mgmt_pending_remove(cmd);
3888
3889 unlock:
3890 hci_dev_unlock(hdev);
3891 return err;
3892 }
3893
3894 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3895 void *data, u16 len)
3896 {
3897 struct mgmt_cp_set_static_address *cp = data;
3898 int err;
3899
3900 BT_DBG("%s", hdev->name);
3901
3902 if (!lmp_le_capable(hdev))
3903 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3904 MGMT_STATUS_NOT_SUPPORTED);
3905
3906 if (hdev_is_powered(hdev))
3907 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3908 MGMT_STATUS_REJECTED);
3909
3910 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3911 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3912 return cmd_status(sk, hdev->id,
3913 MGMT_OP_SET_STATIC_ADDRESS,
3914 MGMT_STATUS_INVALID_PARAMS);
3915
3916 /* Two most significant bits shall be set */
3917 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3918 return cmd_status(sk, hdev->id,
3919 MGMT_OP_SET_STATIC_ADDRESS,
3920 MGMT_STATUS_INVALID_PARAMS);
3921 }
3922
3923 hci_dev_lock(hdev);
3924
3925 bacpy(&hdev->static_addr, &cp->bdaddr);
3926
3927 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3928
3929 hci_dev_unlock(hdev);
3930
3931 return err;
3932 }
3933
3934 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3935 void *data, u16 len)
3936 {
3937 struct mgmt_cp_set_scan_params *cp = data;
3938 __u16 interval, window;
3939 int err;
3940
3941 BT_DBG("%s", hdev->name);
3942
3943 if (!lmp_le_capable(hdev))
3944 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3945 MGMT_STATUS_NOT_SUPPORTED);
3946
3947 interval = __le16_to_cpu(cp->interval);
3948
3949 if (interval < 0x0004 || interval > 0x4000)
3950 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3951 MGMT_STATUS_INVALID_PARAMS);
3952
3953 window = __le16_to_cpu(cp->window);
3954
3955 if (window < 0x0004 || window > 0x4000)
3956 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3957 MGMT_STATUS_INVALID_PARAMS);
3958
3959 if (window > interval)
3960 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3961 MGMT_STATUS_INVALID_PARAMS);
3962
3963 hci_dev_lock(hdev);
3964
3965 hdev->le_scan_interval = interval;
3966 hdev->le_scan_window = window;
3967
3968 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3969
3970 /* If background scan is running, restart it so new parameters are
3971 * loaded.
3972 */
3973 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3974 hdev->discovery.state == DISCOVERY_STOPPED) {
3975 struct hci_request req;
3976
3977 hci_req_init(&req, hdev);
3978
3979 hci_req_add_le_scan_disable(&req);
3980 hci_req_add_le_passive_scan(&req);
3981
3982 hci_req_run(&req, NULL);
3983 }
3984
3985 hci_dev_unlock(hdev);
3986
3987 return err;
3988 }
3989
3990 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3991 {
3992 struct pending_cmd *cmd;
3993
3994 BT_DBG("status 0x%02x", status);
3995
3996 hci_dev_lock(hdev);
3997
3998 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3999 if (!cmd)
4000 goto unlock;
4001
4002 if (status) {
4003 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4004 mgmt_status(status));
4005 } else {
4006 struct mgmt_mode *cp = cmd->param;
4007
4008 if (cp->val)
4009 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4010 else
4011 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4012
4013 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4014 new_settings(hdev, cmd->sk);
4015 }
4016
4017 mgmt_pending_remove(cmd);
4018
4019 unlock:
4020 hci_dev_unlock(hdev);
4021 }
4022
4023 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4024 void *data, u16 len)
4025 {
4026 struct mgmt_mode *cp = data;
4027 struct pending_cmd *cmd;
4028 struct hci_request req;
4029 int err;
4030
4031 BT_DBG("%s", hdev->name);
4032
4033 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4034 hdev->hci_ver < BLUETOOTH_VER_1_2)
4035 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4036 MGMT_STATUS_NOT_SUPPORTED);
4037
4038 if (cp->val != 0x00 && cp->val != 0x01)
4039 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4040 MGMT_STATUS_INVALID_PARAMS);
4041
4042 if (!hdev_is_powered(hdev))
4043 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4044 MGMT_STATUS_NOT_POWERED);
4045
4046 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4047 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4048 MGMT_STATUS_REJECTED);
4049
4050 hci_dev_lock(hdev);
4051
4052 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4053 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4054 MGMT_STATUS_BUSY);
4055 goto unlock;
4056 }
4057
4058 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4059 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4060 hdev);
4061 goto unlock;
4062 }
4063
4064 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4065 data, len);
4066 if (!cmd) {
4067 err = -ENOMEM;
4068 goto unlock;
4069 }
4070
4071 hci_req_init(&req, hdev);
4072
4073 write_fast_connectable(&req, cp->val);
4074
4075 err = hci_req_run(&req, fast_connectable_complete);
4076 if (err < 0) {
4077 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4078 MGMT_STATUS_FAILED);
4079 mgmt_pending_remove(cmd);
4080 }
4081
4082 unlock:
4083 hci_dev_unlock(hdev);
4084
4085 return err;
4086 }
4087
4088 static void set_bredr_scan(struct hci_request *req)
4089 {
4090 struct hci_dev *hdev = req->hdev;
4091 u8 scan = 0;
4092
4093 /* Ensure that fast connectable is disabled. This function will
4094 * not do anything if the page scan parameters are already what
4095 * they should be.
4096 */
4097 write_fast_connectable(req, false);
4098
4099 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4100 scan |= SCAN_PAGE;
4101 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4102 scan |= SCAN_INQUIRY;
4103
4104 if (scan)
4105 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4106 }
4107
4108 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4109 {
4110 struct pending_cmd *cmd;
4111
4112 BT_DBG("status 0x%02x", status);
4113
4114 hci_dev_lock(hdev);
4115
4116 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4117 if (!cmd)
4118 goto unlock;
4119
4120 if (status) {
4121 u8 mgmt_err = mgmt_status(status);
4122
4123 /* We need to restore the flag if related HCI commands
4124 * failed.
4125 */
4126 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4127
4128 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4129 } else {
4130 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4131 new_settings(hdev, cmd->sk);
4132 }
4133
4134 mgmt_pending_remove(cmd);
4135
4136 unlock:
4137 hci_dev_unlock(hdev);
4138 }
4139
4140 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4141 {
4142 struct mgmt_mode *cp = data;
4143 struct pending_cmd *cmd;
4144 struct hci_request req;
4145 int err;
4146
4147 BT_DBG("request for %s", hdev->name);
4148
4149 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4150 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4151 MGMT_STATUS_NOT_SUPPORTED);
4152
4153 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4154 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4155 MGMT_STATUS_REJECTED);
4156
4157 if (cp->val != 0x00 && cp->val != 0x01)
4158 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4159 MGMT_STATUS_INVALID_PARAMS);
4160
4161 hci_dev_lock(hdev);
4162
4163 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4164 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4165 goto unlock;
4166 }
4167
4168 if (!hdev_is_powered(hdev)) {
4169 if (!cp->val) {
4170 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4171 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4172 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4173 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4174 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4175 }
4176
4177 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4178
4179 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4180 if (err < 0)
4181 goto unlock;
4182
4183 err = new_settings(hdev, sk);
4184 goto unlock;
4185 }
4186
4187 /* Reject disabling when powered on */
4188 if (!cp->val) {
4189 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4190 MGMT_STATUS_REJECTED);
4191 goto unlock;
4192 }
4193
4194 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4195 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4196 MGMT_STATUS_BUSY);
4197 goto unlock;
4198 }
4199
4200 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4201 if (!cmd) {
4202 err = -ENOMEM;
4203 goto unlock;
4204 }
4205
4206 /* We need to flip the bit already here so that update_adv_data
4207 * generates the correct flags.
4208 */
4209 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4210
4211 hci_req_init(&req, hdev);
4212
4213 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4214 set_bredr_scan(&req);
4215
4216 /* Since only the advertising data flags will change, there
4217 * is no need to update the scan response data.
4218 */
4219 update_adv_data(&req);
4220
4221 err = hci_req_run(&req, set_bredr_complete);
4222 if (err < 0)
4223 mgmt_pending_remove(cmd);
4224
4225 unlock:
4226 hci_dev_unlock(hdev);
4227 return err;
4228 }
4229
4230 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4231 void *data, u16 len)
4232 {
4233 struct mgmt_mode *cp = data;
4234 struct pending_cmd *cmd;
4235 u8 val, status;
4236 int err;
4237
4238 BT_DBG("request for %s", hdev->name);
4239
4240 status = mgmt_bredr_support(hdev);
4241 if (status)
4242 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4243 status);
4244
4245 if (!lmp_sc_capable(hdev) &&
4246 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4247 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4248 MGMT_STATUS_NOT_SUPPORTED);
4249
4250 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4251 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4252 MGMT_STATUS_INVALID_PARAMS);
4253
4254 hci_dev_lock(hdev);
4255
4256 if (!hdev_is_powered(hdev)) {
4257 bool changed;
4258
4259 if (cp->val) {
4260 changed = !test_and_set_bit(HCI_SC_ENABLED,
4261 &hdev->dev_flags);
4262 if (cp->val == 0x02)
4263 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4264 else
4265 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4266 } else {
4267 changed = test_and_clear_bit(HCI_SC_ENABLED,
4268 &hdev->dev_flags);
4269 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4270 }
4271
4272 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4273 if (err < 0)
4274 goto failed;
4275
4276 if (changed)
4277 err = new_settings(hdev, sk);
4278
4279 goto failed;
4280 }
4281
4282 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4283 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4284 MGMT_STATUS_BUSY);
4285 goto failed;
4286 }
4287
4288 val = !!cp->val;
4289
4290 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4291 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4292 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4293 goto failed;
4294 }
4295
4296 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4297 if (!cmd) {
4298 err = -ENOMEM;
4299 goto failed;
4300 }
4301
4302 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4303 if (err < 0) {
4304 mgmt_pending_remove(cmd);
4305 goto failed;
4306 }
4307
4308 if (cp->val == 0x02)
4309 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4310 else
4311 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4312
4313 failed:
4314 hci_dev_unlock(hdev);
4315 return err;
4316 }
4317
4318 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4319 void *data, u16 len)
4320 {
4321 struct mgmt_mode *cp = data;
4322 bool changed;
4323 int err;
4324
4325 BT_DBG("request for %s", hdev->name);
4326
4327 if (cp->val != 0x00 && cp->val != 0x01)
4328 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4329 MGMT_STATUS_INVALID_PARAMS);
4330
4331 hci_dev_lock(hdev);
4332
4333 if (cp->val)
4334 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4335 else
4336 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4337
4338 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4339 if (err < 0)
4340 goto unlock;
4341
4342 if (changed)
4343 err = new_settings(hdev, sk);
4344
4345 unlock:
4346 hci_dev_unlock(hdev);
4347 return err;
4348 }
4349
4350 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4351 u16 len)
4352 {
4353 struct mgmt_cp_set_privacy *cp = cp_data;
4354 bool changed;
4355 int err;
4356
4357 BT_DBG("request for %s", hdev->name);
4358
4359 if (!lmp_le_capable(hdev))
4360 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4361 MGMT_STATUS_NOT_SUPPORTED);
4362
4363 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4364 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4365 MGMT_STATUS_INVALID_PARAMS);
4366
4367 if (hdev_is_powered(hdev))
4368 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4369 MGMT_STATUS_REJECTED);
4370
4371 hci_dev_lock(hdev);
4372
4373 /* If user space supports this command it is also expected to
4374 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4375 */
4376 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4377
4378 if (cp->privacy) {
4379 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4380 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4381 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4382 } else {
4383 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4384 memset(hdev->irk, 0, sizeof(hdev->irk));
4385 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4386 }
4387
4388 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4389 if (err < 0)
4390 goto unlock;
4391
4392 if (changed)
4393 err = new_settings(hdev, sk);
4394
4395 unlock:
4396 hci_dev_unlock(hdev);
4397 return err;
4398 }
4399
4400 static bool irk_is_valid(struct mgmt_irk_info *irk)
4401 {
4402 switch (irk->addr.type) {
4403 case BDADDR_LE_PUBLIC:
4404 return true;
4405
4406 case BDADDR_LE_RANDOM:
4407 /* Two most significant bits shall be set */
4408 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4409 return false;
4410 return true;
4411 }
4412
4413 return false;
4414 }
4415
4416 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4417 u16 len)
4418 {
4419 struct mgmt_cp_load_irks *cp = cp_data;
4420 u16 irk_count, expected_len;
4421 int i, err;
4422
4423 BT_DBG("request for %s", hdev->name);
4424
4425 if (!lmp_le_capable(hdev))
4426 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4427 MGMT_STATUS_NOT_SUPPORTED);
4428
4429 irk_count = __le16_to_cpu(cp->irk_count);
4430
4431 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4432 if (expected_len != len) {
4433 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4434 expected_len, len);
4435 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4436 MGMT_STATUS_INVALID_PARAMS);
4437 }
4438
4439 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4440
4441 for (i = 0; i < irk_count; i++) {
4442 struct mgmt_irk_info *key = &cp->irks[i];
4443
4444 if (!irk_is_valid(key))
4445 return cmd_status(sk, hdev->id,
4446 MGMT_OP_LOAD_IRKS,
4447 MGMT_STATUS_INVALID_PARAMS);
4448 }
4449
4450 hci_dev_lock(hdev);
4451
4452 hci_smp_irks_clear(hdev);
4453
4454 for (i = 0; i < irk_count; i++) {
4455 struct mgmt_irk_info *irk = &cp->irks[i];
4456 u8 addr_type;
4457
4458 if (irk->addr.type == BDADDR_LE_PUBLIC)
4459 addr_type = ADDR_LE_DEV_PUBLIC;
4460 else
4461 addr_type = ADDR_LE_DEV_RANDOM;
4462
4463 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4464 BDADDR_ANY);
4465 }
4466
4467 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4468
4469 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4470
4471 hci_dev_unlock(hdev);
4472
4473 return err;
4474 }
4475
4476 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4477 {
4478 if (key->master != 0x00 && key->master != 0x01)
4479 return false;
4480
4481 switch (key->addr.type) {
4482 case BDADDR_LE_PUBLIC:
4483 return true;
4484
4485 case BDADDR_LE_RANDOM:
4486 /* Two most significant bits shall be set */
4487 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4488 return false;
4489 return true;
4490 }
4491
4492 return false;
4493 }
4494
4495 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4496 void *cp_data, u16 len)
4497 {
4498 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4499 u16 key_count, expected_len;
4500 int i, err;
4501
4502 BT_DBG("request for %s", hdev->name);
4503
4504 if (!lmp_le_capable(hdev))
4505 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4506 MGMT_STATUS_NOT_SUPPORTED);
4507
4508 key_count = __le16_to_cpu(cp->key_count);
4509
4510 expected_len = sizeof(*cp) + key_count *
4511 sizeof(struct mgmt_ltk_info);
4512 if (expected_len != len) {
4513 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4514 expected_len, len);
4515 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4516 MGMT_STATUS_INVALID_PARAMS);
4517 }
4518
4519 BT_DBG("%s key_count %u", hdev->name, key_count);
4520
4521 for (i = 0; i < key_count; i++) {
4522 struct mgmt_ltk_info *key = &cp->keys[i];
4523
4524 if (!ltk_is_valid(key))
4525 return cmd_status(sk, hdev->id,
4526 MGMT_OP_LOAD_LONG_TERM_KEYS,
4527 MGMT_STATUS_INVALID_PARAMS);
4528 }
4529
4530 hci_dev_lock(hdev);
4531
4532 hci_smp_ltks_clear(hdev);
4533
4534 for (i = 0; i < key_count; i++) {
4535 struct mgmt_ltk_info *key = &cp->keys[i];
4536 u8 type, addr_type;
4537
4538 if (key->addr.type == BDADDR_LE_PUBLIC)
4539 addr_type = ADDR_LE_DEV_PUBLIC;
4540 else
4541 addr_type = ADDR_LE_DEV_RANDOM;
4542
4543 if (key->master)
4544 type = HCI_SMP_LTK;
4545 else
4546 type = HCI_SMP_LTK_SLAVE;
4547
4548 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4549 key->type, key->val, key->enc_size, key->ediv,
4550 key->rand);
4551 }
4552
4553 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4554 NULL, 0);
4555
4556 hci_dev_unlock(hdev);
4557
4558 return err;
4559 }
4560
4561 struct cmd_conn_lookup {
4562 struct hci_conn *conn;
4563 bool valid_tx_power;
4564 u8 mgmt_status;
4565 };
4566
4567 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4568 {
4569 struct cmd_conn_lookup *match = data;
4570 struct mgmt_cp_get_conn_info *cp;
4571 struct mgmt_rp_get_conn_info rp;
4572 struct hci_conn *conn = cmd->user_data;
4573
4574 if (conn != match->conn)
4575 return;
4576
4577 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4578
4579 memset(&rp, 0, sizeof(rp));
4580 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4581 rp.addr.type = cp->addr.type;
4582
4583 if (!match->mgmt_status) {
4584 rp.rssi = conn->rssi;
4585
4586 if (match->valid_tx_power) {
4587 rp.tx_power = conn->tx_power;
4588 rp.max_tx_power = conn->max_tx_power;
4589 } else {
4590 rp.tx_power = HCI_TX_POWER_INVALID;
4591 rp.max_tx_power = HCI_TX_POWER_INVALID;
4592 }
4593 }
4594
4595 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4596 match->mgmt_status, &rp, sizeof(rp));
4597
4598 hci_conn_drop(conn);
4599
4600 mgmt_pending_remove(cmd);
4601 }
4602
4603 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4604 {
4605 struct hci_cp_read_rssi *cp;
4606 struct hci_conn *conn;
4607 struct cmd_conn_lookup match;
4608 u16 handle;
4609
4610 BT_DBG("status 0x%02x", status);
4611
4612 hci_dev_lock(hdev);
4613
4614 /* TX power data is valid in case request completed successfully,
4615 * otherwise we assume it's not valid. At the moment we assume that
4616 * either both or none of current and max values are valid to keep code
4617 * simple.
4618 */
4619 match.valid_tx_power = !status;
4620
4621 /* Commands sent in request are either Read RSSI or Read Transmit Power
4622 * Level so we check which one was last sent to retrieve connection
4623 * handle. Both commands have handle as first parameter so it's safe to
4624 * cast data on the same command struct.
4625 *
4626 * First command sent is always Read RSSI and we fail only if it fails.
4627 * In other case we simply override error to indicate success as we
4628 * already remembered if TX power value is actually valid.
4629 */
4630 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4631 if (!cp) {
4632 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4633 status = 0;
4634 }
4635
4636 if (!cp) {
4637 BT_ERR("invalid sent_cmd in response");
4638 goto unlock;
4639 }
4640
4641 handle = __le16_to_cpu(cp->handle);
4642 conn = hci_conn_hash_lookup_handle(hdev, handle);
4643 if (!conn) {
4644 BT_ERR("unknown handle (%d) in response", handle);
4645 goto unlock;
4646 }
4647
4648 match.conn = conn;
4649 match.mgmt_status = mgmt_status(status);
4650
4651 /* Cache refresh is complete, now reply for mgmt request for given
4652 * connection only.
4653 */
4654 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4655 get_conn_info_complete, &match);
4656
4657 unlock:
4658 hci_dev_unlock(hdev);
4659 }
4660
4661 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4662 u16 len)
4663 {
4664 struct mgmt_cp_get_conn_info *cp = data;
4665 struct mgmt_rp_get_conn_info rp;
4666 struct hci_conn *conn;
4667 unsigned long conn_info_age;
4668 int err = 0;
4669
4670 BT_DBG("%s", hdev->name);
4671
4672 memset(&rp, 0, sizeof(rp));
4673 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4674 rp.addr.type = cp->addr.type;
4675
4676 if (!bdaddr_type_is_valid(cp->addr.type))
4677 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4678 MGMT_STATUS_INVALID_PARAMS,
4679 &rp, sizeof(rp));
4680
4681 hci_dev_lock(hdev);
4682
4683 if (!hdev_is_powered(hdev)) {
4684 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4685 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4686 goto unlock;
4687 }
4688
4689 if (cp->addr.type == BDADDR_BREDR)
4690 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4691 &cp->addr.bdaddr);
4692 else
4693 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4694
4695 if (!conn || conn->state != BT_CONNECTED) {
4696 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4697 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4698 goto unlock;
4699 }
4700
4701 /* To avoid client trying to guess when to poll again for information we
4702 * calculate conn info age as random value between min/max set in hdev.
4703 */
4704 conn_info_age = hdev->conn_info_min_age +
4705 prandom_u32_max(hdev->conn_info_max_age -
4706 hdev->conn_info_min_age);
4707
4708 /* Query controller to refresh cached values if they are too old or were
4709 * never read.
4710 */
4711 if (time_after(jiffies, conn->conn_info_timestamp + conn_info_age) ||
4712 !conn->conn_info_timestamp) {
4713 struct hci_request req;
4714 struct hci_cp_read_tx_power req_txp_cp;
4715 struct hci_cp_read_rssi req_rssi_cp;
4716 struct pending_cmd *cmd;
4717
4718 hci_req_init(&req, hdev);
4719 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4720 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4721 &req_rssi_cp);
4722
4723 /* For LE links TX power does not change thus we don't need to
4724 * query for it once value is known.
4725 */
4726 if (!bdaddr_type_is_le(cp->addr.type) ||
4727 conn->tx_power == HCI_TX_POWER_INVALID) {
4728 req_txp_cp.handle = cpu_to_le16(conn->handle);
4729 req_txp_cp.type = 0x00;
4730 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4731 sizeof(req_txp_cp), &req_txp_cp);
4732 }
4733
4734 /* Max TX power needs to be read only once per connection */
4735 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4736 req_txp_cp.handle = cpu_to_le16(conn->handle);
4737 req_txp_cp.type = 0x01;
4738 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4739 sizeof(req_txp_cp), &req_txp_cp);
4740 }
4741
4742 err = hci_req_run(&req, conn_info_refresh_complete);
4743 if (err < 0)
4744 goto unlock;
4745
4746 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4747 data, len);
4748 if (!cmd) {
4749 err = -ENOMEM;
4750 goto unlock;
4751 }
4752
4753 hci_conn_hold(conn);
4754 cmd->user_data = conn;
4755
4756 conn->conn_info_timestamp = jiffies;
4757 } else {
4758 /* Cache is valid, just reply with values cached in hci_conn */
4759 rp.rssi = conn->rssi;
4760 rp.tx_power = conn->tx_power;
4761 rp.max_tx_power = conn->max_tx_power;
4762
4763 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4764 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4765 }
4766
4767 unlock:
4768 hci_dev_unlock(hdev);
4769 return err;
4770 }
4771
4772 static const struct mgmt_handler {
4773 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4774 u16 data_len);
4775 bool var_len;
4776 size_t data_len;
4777 } mgmt_handlers[] = {
4778 { NULL }, /* 0x0000 (no command) */
4779 { read_version, false, MGMT_READ_VERSION_SIZE },
4780 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4781 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4782 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4783 { set_powered, false, MGMT_SETTING_SIZE },
4784 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4785 { set_connectable, false, MGMT_SETTING_SIZE },
4786 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4787 { set_pairable, false, MGMT_SETTING_SIZE },
4788 { set_link_security, false, MGMT_SETTING_SIZE },
4789 { set_ssp, false, MGMT_SETTING_SIZE },
4790 { set_hs, false, MGMT_SETTING_SIZE },
4791 { set_le, false, MGMT_SETTING_SIZE },
4792 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4793 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4794 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4795 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4796 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4797 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4798 { disconnect, false, MGMT_DISCONNECT_SIZE },
4799 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4800 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4801 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4802 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4803 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4804 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4805 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4806 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4807 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4808 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4809 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4810 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4811 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4812 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4813 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4814 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4815 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4816 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4817 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4818 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4819 { set_advertising, false, MGMT_SETTING_SIZE },
4820 { set_bredr, false, MGMT_SETTING_SIZE },
4821 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4822 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4823 { set_secure_conn, false, MGMT_SETTING_SIZE },
4824 { set_debug_keys, false, MGMT_SETTING_SIZE },
4825 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4826 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4827 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4828 };
4829
4830
4831 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4832 {
4833 void *buf;
4834 u8 *cp;
4835 struct mgmt_hdr *hdr;
4836 u16 opcode, index, len;
4837 struct hci_dev *hdev = NULL;
4838 const struct mgmt_handler *handler;
4839 int err;
4840
4841 BT_DBG("got %zu bytes", msglen);
4842
4843 if (msglen < sizeof(*hdr))
4844 return -EINVAL;
4845
4846 buf = kmalloc(msglen, GFP_KERNEL);
4847 if (!buf)
4848 return -ENOMEM;
4849
4850 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4851 err = -EFAULT;
4852 goto done;
4853 }
4854
4855 hdr = buf;
4856 opcode = __le16_to_cpu(hdr->opcode);
4857 index = __le16_to_cpu(hdr->index);
4858 len = __le16_to_cpu(hdr->len);
4859
4860 if (len != msglen - sizeof(*hdr)) {
4861 err = -EINVAL;
4862 goto done;
4863 }
4864
4865 if (index != MGMT_INDEX_NONE) {
4866 hdev = hci_dev_get(index);
4867 if (!hdev) {
4868 err = cmd_status(sk, index, opcode,
4869 MGMT_STATUS_INVALID_INDEX);
4870 goto done;
4871 }
4872
4873 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4874 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4875 err = cmd_status(sk, index, opcode,
4876 MGMT_STATUS_INVALID_INDEX);
4877 goto done;
4878 }
4879 }
4880
4881 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4882 mgmt_handlers[opcode].func == NULL) {
4883 BT_DBG("Unknown op %u", opcode);
4884 err = cmd_status(sk, index, opcode,
4885 MGMT_STATUS_UNKNOWN_COMMAND);
4886 goto done;
4887 }
4888
4889 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4890 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4891 err = cmd_status(sk, index, opcode,
4892 MGMT_STATUS_INVALID_INDEX);
4893 goto done;
4894 }
4895
4896 handler = &mgmt_handlers[opcode];
4897
4898 if ((handler->var_len && len < handler->data_len) ||
4899 (!handler->var_len && len != handler->data_len)) {
4900 err = cmd_status(sk, index, opcode,
4901 MGMT_STATUS_INVALID_PARAMS);
4902 goto done;
4903 }
4904
4905 if (hdev)
4906 mgmt_init_hdev(sk, hdev);
4907
4908 cp = buf + sizeof(*hdr);
4909
4910 err = handler->func(sk, hdev, cp, len);
4911 if (err < 0)
4912 goto done;
4913
4914 err = msglen;
4915
4916 done:
4917 if (hdev)
4918 hci_dev_put(hdev);
4919
4920 kfree(buf);
4921 return err;
4922 }
4923
4924 void mgmt_index_added(struct hci_dev *hdev)
4925 {
4926 if (hdev->dev_type != HCI_BREDR)
4927 return;
4928
4929 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4930 }
4931
4932 void mgmt_index_removed(struct hci_dev *hdev)
4933 {
4934 u8 status = MGMT_STATUS_INVALID_INDEX;
4935
4936 if (hdev->dev_type != HCI_BREDR)
4937 return;
4938
4939 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4940
4941 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4942 }
4943
4944 /* This function requires the caller holds hdev->lock */
4945 static void restart_le_auto_conns(struct hci_dev *hdev)
4946 {
4947 struct hci_conn_params *p;
4948
4949 list_for_each_entry(p, &hdev->le_conn_params, list) {
4950 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4951 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4952 }
4953 }
4954
4955 static void powered_complete(struct hci_dev *hdev, u8 status)
4956 {
4957 struct cmd_lookup match = { NULL, hdev };
4958
4959 BT_DBG("status 0x%02x", status);
4960
4961 hci_dev_lock(hdev);
4962
4963 restart_le_auto_conns(hdev);
4964
4965 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4966
4967 new_settings(hdev, match.sk);
4968
4969 hci_dev_unlock(hdev);
4970
4971 if (match.sk)
4972 sock_put(match.sk);
4973 }
4974
4975 static int powered_update_hci(struct hci_dev *hdev)
4976 {
4977 struct hci_request req;
4978 u8 link_sec;
4979
4980 hci_req_init(&req, hdev);
4981
4982 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4983 !lmp_host_ssp_capable(hdev)) {
4984 u8 ssp = 1;
4985
4986 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4987 }
4988
4989 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4990 lmp_bredr_capable(hdev)) {
4991 struct hci_cp_write_le_host_supported cp;
4992
4993 cp.le = 1;
4994 cp.simul = lmp_le_br_capable(hdev);
4995
4996 /* Check first if we already have the right
4997 * host state (host features set)
4998 */
4999 if (cp.le != lmp_host_le_capable(hdev) ||
5000 cp.simul != lmp_host_le_br_capable(hdev))
5001 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5002 sizeof(cp), &cp);
5003 }
5004
5005 if (lmp_le_capable(hdev)) {
5006 /* Make sure the controller has a good default for
5007 * advertising data. This also applies to the case
5008 * where BR/EDR was toggled during the AUTO_OFF phase.
5009 */
5010 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5011 update_adv_data(&req);
5012 update_scan_rsp_data(&req);
5013 }
5014
5015 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5016 enable_advertising(&req);
5017 }
5018
5019 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5020 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5021 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5022 sizeof(link_sec), &link_sec);
5023
5024 if (lmp_bredr_capable(hdev)) {
5025 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5026 set_bredr_scan(&req);
5027 update_class(&req);
5028 update_name(&req);
5029 update_eir(&req);
5030 }
5031
5032 return hci_req_run(&req, powered_complete);
5033 }
5034
5035 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5036 {
5037 struct cmd_lookup match = { NULL, hdev };
5038 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5039 u8 zero_cod[] = { 0, 0, 0 };
5040 int err;
5041
5042 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5043 return 0;
5044
5045 if (powered) {
5046 if (powered_update_hci(hdev) == 0)
5047 return 0;
5048
5049 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5050 &match);
5051 goto new_settings;
5052 }
5053
5054 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5055 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5056
5057 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5058 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5059 zero_cod, sizeof(zero_cod), NULL);
5060
5061 new_settings:
5062 err = new_settings(hdev, match.sk);
5063
5064 if (match.sk)
5065 sock_put(match.sk);
5066
5067 return err;
5068 }
5069
5070 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5071 {
5072 struct pending_cmd *cmd;
5073 u8 status;
5074
5075 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5076 if (!cmd)
5077 return;
5078
5079 if (err == -ERFKILL)
5080 status = MGMT_STATUS_RFKILLED;
5081 else
5082 status = MGMT_STATUS_FAILED;
5083
5084 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5085
5086 mgmt_pending_remove(cmd);
5087 }
5088
5089 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5090 {
5091 struct hci_request req;
5092
5093 hci_dev_lock(hdev);
5094
5095 /* When discoverable timeout triggers, then just make sure
5096 * the limited discoverable flag is cleared. Even in the case
5097 * of a timeout triggered from general discoverable, it is
5098 * safe to unconditionally clear the flag.
5099 */
5100 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5101 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5102
5103 hci_req_init(&req, hdev);
5104 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5105 u8 scan = SCAN_PAGE;
5106 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5107 sizeof(scan), &scan);
5108 }
5109 update_class(&req);
5110 update_adv_data(&req);
5111 hci_req_run(&req, NULL);
5112
5113 hdev->discov_timeout = 0;
5114
5115 new_settings(hdev, NULL);
5116
5117 hci_dev_unlock(hdev);
5118 }
5119
5120 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5121 {
5122 bool changed;
5123
5124 /* Nothing needed here if there's a pending command since that
5125 * commands request completion callback takes care of everything
5126 * necessary.
5127 */
5128 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5129 return;
5130
5131 /* Powering off may clear the scan mode - don't let that interfere */
5132 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5133 return;
5134
5135 if (discoverable) {
5136 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5137 } else {
5138 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5139 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5140 }
5141
5142 if (changed) {
5143 struct hci_request req;
5144
5145 /* In case this change in discoverable was triggered by
5146 * a disabling of connectable there could be a need to
5147 * update the advertising flags.
5148 */
5149 hci_req_init(&req, hdev);
5150 update_adv_data(&req);
5151 hci_req_run(&req, NULL);
5152
5153 new_settings(hdev, NULL);
5154 }
5155 }
5156
5157 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5158 {
5159 bool changed;
5160
5161 /* Nothing needed here if there's a pending command since that
5162 * commands request completion callback takes care of everything
5163 * necessary.
5164 */
5165 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5166 return;
5167
5168 /* Powering off may clear the scan mode - don't let that interfere */
5169 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5170 return;
5171
5172 if (connectable)
5173 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5174 else
5175 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5176
5177 if (changed)
5178 new_settings(hdev, NULL);
5179 }
5180
5181 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5182 {
5183 /* Powering off may stop advertising - don't let that interfere */
5184 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5185 return;
5186
5187 if (advertising)
5188 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5189 else
5190 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5191 }
5192
5193 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5194 {
5195 u8 mgmt_err = mgmt_status(status);
5196
5197 if (scan & SCAN_PAGE)
5198 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5199 cmd_status_rsp, &mgmt_err);
5200
5201 if (scan & SCAN_INQUIRY)
5202 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5203 cmd_status_rsp, &mgmt_err);
5204 }
5205
5206 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5207 bool persistent)
5208 {
5209 struct mgmt_ev_new_link_key ev;
5210
5211 memset(&ev, 0, sizeof(ev));
5212
5213 ev.store_hint = persistent;
5214 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5215 ev.key.addr.type = BDADDR_BREDR;
5216 ev.key.type = key->type;
5217 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5218 ev.key.pin_len = key->pin_len;
5219
5220 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5221 }
5222
5223 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5224 {
5225 struct mgmt_ev_new_long_term_key ev;
5226
5227 memset(&ev, 0, sizeof(ev));
5228
5229 /* Devices using resolvable or non-resolvable random addresses
5230 * without providing an indentity resolving key don't require
5231 * to store long term keys. Their addresses will change the
5232 * next time around.
5233 *
5234 * Only when a remote device provides an identity address
5235 * make sure the long term key is stored. If the remote
5236 * identity is known, the long term keys are internally
5237 * mapped to the identity address. So allow static random
5238 * and public addresses here.
5239 */
5240 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5241 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5242 ev.store_hint = 0x00;
5243 else
5244 ev.store_hint = persistent;
5245
5246 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5247 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5248 ev.key.type = key->authenticated;
5249 ev.key.enc_size = key->enc_size;
5250 ev.key.ediv = key->ediv;
5251 ev.key.rand = key->rand;
5252
5253 if (key->type == HCI_SMP_LTK)
5254 ev.key.master = 1;
5255
5256 memcpy(ev.key.val, key->val, sizeof(key->val));
5257
5258 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5259 }
5260
5261 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5262 {
5263 struct mgmt_ev_new_irk ev;
5264
5265 memset(&ev, 0, sizeof(ev));
5266
5267 /* For identity resolving keys from devices that are already
5268 * using a public address or static random address, do not
5269 * ask for storing this key. The identity resolving key really
5270 * is only mandatory for devices using resovlable random
5271 * addresses.
5272 *
5273 * Storing all identity resolving keys has the downside that
5274 * they will be also loaded on next boot of they system. More
5275 * identity resolving keys, means more time during scanning is
5276 * needed to actually resolve these addresses.
5277 */
5278 if (bacmp(&irk->rpa, BDADDR_ANY))
5279 ev.store_hint = 0x01;
5280 else
5281 ev.store_hint = 0x00;
5282
5283 bacpy(&ev.rpa, &irk->rpa);
5284 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5285 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5286 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5287
5288 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5289 }
5290
5291 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5292 bool persistent)
5293 {
5294 struct mgmt_ev_new_csrk ev;
5295
5296 memset(&ev, 0, sizeof(ev));
5297
5298 /* Devices using resolvable or non-resolvable random addresses
5299 * without providing an indentity resolving key don't require
5300 * to store signature resolving keys. Their addresses will change
5301 * the next time around.
5302 *
5303 * Only when a remote device provides an identity address
5304 * make sure the signature resolving key is stored. So allow
5305 * static random and public addresses here.
5306 */
5307 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5308 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5309 ev.store_hint = 0x00;
5310 else
5311 ev.store_hint = persistent;
5312
5313 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5314 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5315 ev.key.master = csrk->master;
5316 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5317
5318 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5319 }
5320
5321 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5322 u8 data_len)
5323 {
5324 eir[eir_len++] = sizeof(type) + data_len;
5325 eir[eir_len++] = type;
5326 memcpy(&eir[eir_len], data, data_len);
5327 eir_len += data_len;
5328
5329 return eir_len;
5330 }
5331
5332 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5333 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5334 u8 *dev_class)
5335 {
5336 char buf[512];
5337 struct mgmt_ev_device_connected *ev = (void *) buf;
5338 u16 eir_len = 0;
5339
5340 bacpy(&ev->addr.bdaddr, bdaddr);
5341 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5342
5343 ev->flags = __cpu_to_le32(flags);
5344
5345 if (name_len > 0)
5346 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5347 name, name_len);
5348
5349 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5350 eir_len = eir_append_data(ev->eir, eir_len,
5351 EIR_CLASS_OF_DEV, dev_class, 3);
5352
5353 ev->eir_len = cpu_to_le16(eir_len);
5354
5355 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5356 sizeof(*ev) + eir_len, NULL);
5357 }
5358
5359 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5360 {
5361 struct mgmt_cp_disconnect *cp = cmd->param;
5362 struct sock **sk = data;
5363 struct mgmt_rp_disconnect rp;
5364
5365 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5366 rp.addr.type = cp->addr.type;
5367
5368 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5369 sizeof(rp));
5370
5371 *sk = cmd->sk;
5372 sock_hold(*sk);
5373
5374 mgmt_pending_remove(cmd);
5375 }
5376
5377 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5378 {
5379 struct hci_dev *hdev = data;
5380 struct mgmt_cp_unpair_device *cp = cmd->param;
5381 struct mgmt_rp_unpair_device rp;
5382
5383 memset(&rp, 0, sizeof(rp));
5384 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5385 rp.addr.type = cp->addr.type;
5386
5387 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5388
5389 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5390
5391 mgmt_pending_remove(cmd);
5392 }
5393
5394 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5395 u8 link_type, u8 addr_type, u8 reason,
5396 bool mgmt_connected)
5397 {
5398 struct mgmt_ev_device_disconnected ev;
5399 struct pending_cmd *power_off;
5400 struct sock *sk = NULL;
5401
5402 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5403 if (power_off) {
5404 struct mgmt_mode *cp = power_off->param;
5405
5406 /* The connection is still in hci_conn_hash so test for 1
5407 * instead of 0 to know if this is the last one.
5408 */
5409 if (!cp->val && hci_conn_count(hdev) == 1) {
5410 cancel_delayed_work(&hdev->power_off);
5411 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5412 }
5413 }
5414
5415 if (!mgmt_connected)
5416 return;
5417
5418 if (link_type != ACL_LINK && link_type != LE_LINK)
5419 return;
5420
5421 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5422
5423 bacpy(&ev.addr.bdaddr, bdaddr);
5424 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5425 ev.reason = reason;
5426
5427 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5428
5429 if (sk)
5430 sock_put(sk);
5431
5432 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5433 hdev);
5434 }
5435
5436 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5437 u8 link_type, u8 addr_type, u8 status)
5438 {
5439 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5440 struct mgmt_cp_disconnect *cp;
5441 struct mgmt_rp_disconnect rp;
5442 struct pending_cmd *cmd;
5443
5444 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5445 hdev);
5446
5447 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5448 if (!cmd)
5449 return;
5450
5451 cp = cmd->param;
5452
5453 if (bacmp(bdaddr, &cp->addr.bdaddr))
5454 return;
5455
5456 if (cp->addr.type != bdaddr_type)
5457 return;
5458
5459 bacpy(&rp.addr.bdaddr, bdaddr);
5460 rp.addr.type = bdaddr_type;
5461
5462 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5463 mgmt_status(status), &rp, sizeof(rp));
5464
5465 mgmt_pending_remove(cmd);
5466 }
5467
5468 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5469 u8 addr_type, u8 status)
5470 {
5471 struct mgmt_ev_connect_failed ev;
5472 struct pending_cmd *power_off;
5473
5474 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5475 if (power_off) {
5476 struct mgmt_mode *cp = power_off->param;
5477
5478 /* The connection is still in hci_conn_hash so test for 1
5479 * instead of 0 to know if this is the last one.
5480 */
5481 if (!cp->val && hci_conn_count(hdev) == 1) {
5482 cancel_delayed_work(&hdev->power_off);
5483 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5484 }
5485 }
5486
5487 bacpy(&ev.addr.bdaddr, bdaddr);
5488 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5489 ev.status = mgmt_status(status);
5490
5491 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5492 }
5493
5494 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5495 {
5496 struct mgmt_ev_pin_code_request ev;
5497
5498 bacpy(&ev.addr.bdaddr, bdaddr);
5499 ev.addr.type = BDADDR_BREDR;
5500 ev.secure = secure;
5501
5502 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5503 }
5504
5505 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5506 u8 status)
5507 {
5508 struct pending_cmd *cmd;
5509 struct mgmt_rp_pin_code_reply rp;
5510
5511 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5512 if (!cmd)
5513 return;
5514
5515 bacpy(&rp.addr.bdaddr, bdaddr);
5516 rp.addr.type = BDADDR_BREDR;
5517
5518 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5519 mgmt_status(status), &rp, sizeof(rp));
5520
5521 mgmt_pending_remove(cmd);
5522 }
5523
5524 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5525 u8 status)
5526 {
5527 struct pending_cmd *cmd;
5528 struct mgmt_rp_pin_code_reply rp;
5529
5530 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5531 if (!cmd)
5532 return;
5533
5534 bacpy(&rp.addr.bdaddr, bdaddr);
5535 rp.addr.type = BDADDR_BREDR;
5536
5537 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5538 mgmt_status(status), &rp, sizeof(rp));
5539
5540 mgmt_pending_remove(cmd);
5541 }
5542
5543 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5544 u8 link_type, u8 addr_type, u32 value,
5545 u8 confirm_hint)
5546 {
5547 struct mgmt_ev_user_confirm_request ev;
5548
5549 BT_DBG("%s", hdev->name);
5550
5551 bacpy(&ev.addr.bdaddr, bdaddr);
5552 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5553 ev.confirm_hint = confirm_hint;
5554 ev.value = cpu_to_le32(value);
5555
5556 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5557 NULL);
5558 }
5559
5560 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5561 u8 link_type, u8 addr_type)
5562 {
5563 struct mgmt_ev_user_passkey_request ev;
5564
5565 BT_DBG("%s", hdev->name);
5566
5567 bacpy(&ev.addr.bdaddr, bdaddr);
5568 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5569
5570 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5571 NULL);
5572 }
5573
5574 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5575 u8 link_type, u8 addr_type, u8 status,
5576 u8 opcode)
5577 {
5578 struct pending_cmd *cmd;
5579 struct mgmt_rp_user_confirm_reply rp;
5580 int err;
5581
5582 cmd = mgmt_pending_find(opcode, hdev);
5583 if (!cmd)
5584 return -ENOENT;
5585
5586 bacpy(&rp.addr.bdaddr, bdaddr);
5587 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5588 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5589 &rp, sizeof(rp));
5590
5591 mgmt_pending_remove(cmd);
5592
5593 return err;
5594 }
5595
5596 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5597 u8 link_type, u8 addr_type, u8 status)
5598 {
5599 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5600 status, MGMT_OP_USER_CONFIRM_REPLY);
5601 }
5602
5603 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5604 u8 link_type, u8 addr_type, u8 status)
5605 {
5606 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5607 status,
5608 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5609 }
5610
5611 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5612 u8 link_type, u8 addr_type, u8 status)
5613 {
5614 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5615 status, MGMT_OP_USER_PASSKEY_REPLY);
5616 }
5617
5618 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5619 u8 link_type, u8 addr_type, u8 status)
5620 {
5621 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5622 status,
5623 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5624 }
5625
5626 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5627 u8 link_type, u8 addr_type, u32 passkey,
5628 u8 entered)
5629 {
5630 struct mgmt_ev_passkey_notify ev;
5631
5632 BT_DBG("%s", hdev->name);
5633
5634 bacpy(&ev.addr.bdaddr, bdaddr);
5635 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5636 ev.passkey = __cpu_to_le32(passkey);
5637 ev.entered = entered;
5638
5639 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5640 }
5641
5642 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5643 u8 addr_type, u8 status)
5644 {
5645 struct mgmt_ev_auth_failed ev;
5646
5647 bacpy(&ev.addr.bdaddr, bdaddr);
5648 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5649 ev.status = mgmt_status(status);
5650
5651 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5652 }
5653
5654 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5655 {
5656 struct cmd_lookup match = { NULL, hdev };
5657 bool changed;
5658
5659 if (status) {
5660 u8 mgmt_err = mgmt_status(status);
5661 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5662 cmd_status_rsp, &mgmt_err);
5663 return;
5664 }
5665
5666 if (test_bit(HCI_AUTH, &hdev->flags))
5667 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5668 &hdev->dev_flags);
5669 else
5670 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5671 &hdev->dev_flags);
5672
5673 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5674 &match);
5675
5676 if (changed)
5677 new_settings(hdev, match.sk);
5678
5679 if (match.sk)
5680 sock_put(match.sk);
5681 }
5682
5683 static void clear_eir(struct hci_request *req)
5684 {
5685 struct hci_dev *hdev = req->hdev;
5686 struct hci_cp_write_eir cp;
5687
5688 if (!lmp_ext_inq_capable(hdev))
5689 return;
5690
5691 memset(hdev->eir, 0, sizeof(hdev->eir));
5692
5693 memset(&cp, 0, sizeof(cp));
5694
5695 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5696 }
5697
5698 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5699 {
5700 struct cmd_lookup match = { NULL, hdev };
5701 struct hci_request req;
5702 bool changed = false;
5703
5704 if (status) {
5705 u8 mgmt_err = mgmt_status(status);
5706
5707 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5708 &hdev->dev_flags)) {
5709 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5710 new_settings(hdev, NULL);
5711 }
5712
5713 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5714 &mgmt_err);
5715 return;
5716 }
5717
5718 if (enable) {
5719 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5720 } else {
5721 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5722 if (!changed)
5723 changed = test_and_clear_bit(HCI_HS_ENABLED,
5724 &hdev->dev_flags);
5725 else
5726 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5727 }
5728
5729 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5730
5731 if (changed)
5732 new_settings(hdev, match.sk);
5733
5734 if (match.sk)
5735 sock_put(match.sk);
5736
5737 hci_req_init(&req, hdev);
5738
5739 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5740 update_eir(&req);
5741 else
5742 clear_eir(&req);
5743
5744 hci_req_run(&req, NULL);
5745 }
5746
5747 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5748 {
5749 struct cmd_lookup match = { NULL, hdev };
5750 bool changed = false;
5751
5752 if (status) {
5753 u8 mgmt_err = mgmt_status(status);
5754
5755 if (enable) {
5756 if (test_and_clear_bit(HCI_SC_ENABLED,
5757 &hdev->dev_flags))
5758 new_settings(hdev, NULL);
5759 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5760 }
5761
5762 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5763 cmd_status_rsp, &mgmt_err);
5764 return;
5765 }
5766
5767 if (enable) {
5768 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5769 } else {
5770 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5771 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5772 }
5773
5774 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5775 settings_rsp, &match);
5776
5777 if (changed)
5778 new_settings(hdev, match.sk);
5779
5780 if (match.sk)
5781 sock_put(match.sk);
5782 }
5783
5784 static void sk_lookup(struct pending_cmd *cmd, void *data)
5785 {
5786 struct cmd_lookup *match = data;
5787
5788 if (match->sk == NULL) {
5789 match->sk = cmd->sk;
5790 sock_hold(match->sk);
5791 }
5792 }
5793
5794 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5795 u8 status)
5796 {
5797 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5798
5799 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5800 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5801 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5802
5803 if (!status)
5804 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5805 NULL);
5806
5807 if (match.sk)
5808 sock_put(match.sk);
5809 }
5810
5811 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5812 {
5813 struct mgmt_cp_set_local_name ev;
5814 struct pending_cmd *cmd;
5815
5816 if (status)
5817 return;
5818
5819 memset(&ev, 0, sizeof(ev));
5820 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5821 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5822
5823 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5824 if (!cmd) {
5825 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5826
5827 /* If this is a HCI command related to powering on the
5828 * HCI dev don't send any mgmt signals.
5829 */
5830 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5831 return;
5832 }
5833
5834 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5835 cmd ? cmd->sk : NULL);
5836 }
5837
5838 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5839 u8 *randomizer192, u8 *hash256,
5840 u8 *randomizer256, u8 status)
5841 {
5842 struct pending_cmd *cmd;
5843
5844 BT_DBG("%s status %u", hdev->name, status);
5845
5846 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5847 if (!cmd)
5848 return;
5849
5850 if (status) {
5851 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5852 mgmt_status(status));
5853 } else {
5854 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5855 hash256 && randomizer256) {
5856 struct mgmt_rp_read_local_oob_ext_data rp;
5857
5858 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5859 memcpy(rp.randomizer192, randomizer192,
5860 sizeof(rp.randomizer192));
5861
5862 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5863 memcpy(rp.randomizer256, randomizer256,
5864 sizeof(rp.randomizer256));
5865
5866 cmd_complete(cmd->sk, hdev->id,
5867 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5868 &rp, sizeof(rp));
5869 } else {
5870 struct mgmt_rp_read_local_oob_data rp;
5871
5872 memcpy(rp.hash, hash192, sizeof(rp.hash));
5873 memcpy(rp.randomizer, randomizer192,
5874 sizeof(rp.randomizer));
5875
5876 cmd_complete(cmd->sk, hdev->id,
5877 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5878 &rp, sizeof(rp));
5879 }
5880 }
5881
5882 mgmt_pending_remove(cmd);
5883 }
5884
5885 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5886 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5887 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5888 u8 scan_rsp_len)
5889 {
5890 char buf[512];
5891 struct mgmt_ev_device_found *ev = (void *) buf;
5892 struct smp_irk *irk;
5893 size_t ev_size;
5894
5895 if (!hci_discovery_active(hdev))
5896 return;
5897
5898 /* Make sure that the buffer is big enough. The 5 extra bytes
5899 * are for the potential CoD field.
5900 */
5901 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5902 return;
5903
5904 memset(buf, 0, sizeof(buf));
5905
5906 irk = hci_get_irk(hdev, bdaddr, addr_type);
5907 if (irk) {
5908 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5909 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5910 } else {
5911 bacpy(&ev->addr.bdaddr, bdaddr);
5912 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5913 }
5914
5915 ev->rssi = rssi;
5916 if (cfm_name)
5917 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5918 if (!ssp)
5919 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5920
5921 if (eir_len > 0)
5922 memcpy(ev->eir, eir, eir_len);
5923
5924 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5925 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5926 dev_class, 3);
5927
5928 if (scan_rsp_len > 0)
5929 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5930
5931 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5932 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5933
5934 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5935 }
5936
5937 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5938 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5939 {
5940 struct mgmt_ev_device_found *ev;
5941 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5942 u16 eir_len;
5943
5944 ev = (struct mgmt_ev_device_found *) buf;
5945
5946 memset(buf, 0, sizeof(buf));
5947
5948 bacpy(&ev->addr.bdaddr, bdaddr);
5949 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5950 ev->rssi = rssi;
5951
5952 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5953 name_len);
5954
5955 ev->eir_len = cpu_to_le16(eir_len);
5956
5957 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5958 }
5959
5960 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5961 {
5962 struct mgmt_ev_discovering ev;
5963 struct pending_cmd *cmd;
5964
5965 BT_DBG("%s discovering %u", hdev->name, discovering);
5966
5967 if (discovering)
5968 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5969 else
5970 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5971
5972 if (cmd != NULL) {
5973 u8 type = hdev->discovery.type;
5974
5975 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5976 sizeof(type));
5977 mgmt_pending_remove(cmd);
5978 }
5979
5980 memset(&ev, 0, sizeof(ev));
5981 ev.type = hdev->discovery.type;
5982 ev.discovering = discovering;
5983
5984 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5985 }
5986
5987 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5988 {
5989 struct pending_cmd *cmd;
5990 struct mgmt_ev_device_blocked ev;
5991
5992 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5993
5994 bacpy(&ev.addr.bdaddr, bdaddr);
5995 ev.addr.type = type;
5996
5997 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5998 cmd ? cmd->sk : NULL);
5999 }
6000
6001 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6002 {
6003 struct pending_cmd *cmd;
6004 struct mgmt_ev_device_unblocked ev;
6005
6006 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6007
6008 bacpy(&ev.addr.bdaddr, bdaddr);
6009 ev.addr.type = type;
6010
6011 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6012 cmd ? cmd->sk : NULL);
6013 }
6014
6015 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6016 {
6017 BT_DBG("%s status %u", hdev->name, status);
6018
6019 /* Clear the advertising mgmt setting if we failed to re-enable it */
6020 if (status) {
6021 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6022 new_settings(hdev, NULL);
6023 }
6024 }
6025
6026 void mgmt_reenable_advertising(struct hci_dev *hdev)
6027 {
6028 struct hci_request req;
6029
6030 if (hci_conn_num(hdev, LE_LINK) > 0)
6031 return;
6032
6033 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6034 return;
6035
6036 hci_req_init(&req, hdev);
6037 enable_advertising(&req);
6038
6039 /* If this fails we have no option but to let user space know
6040 * that we've disabled advertising.
6041 */
6042 if (hci_req_run(&req, adv_enable_complete) < 0) {
6043 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6044 new_settings(hdev, NULL);
6045 }
6046 }
This page took 0.164851 seconds and 5 git commands to generate.