766411e11ac7b24d10c126076bb18954d31b757d
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 };
95
96 static const u16 mgmt_events[] = {
97 MGMT_EV_CONTROLLER_ERROR,
98 MGMT_EV_INDEX_ADDED,
99 MGMT_EV_INDEX_REMOVED,
100 MGMT_EV_NEW_SETTINGS,
101 MGMT_EV_CLASS_OF_DEV_CHANGED,
102 MGMT_EV_LOCAL_NAME_CHANGED,
103 MGMT_EV_NEW_LINK_KEY,
104 MGMT_EV_NEW_LONG_TERM_KEY,
105 MGMT_EV_DEVICE_CONNECTED,
106 MGMT_EV_DEVICE_DISCONNECTED,
107 MGMT_EV_CONNECT_FAILED,
108 MGMT_EV_PIN_CODE_REQUEST,
109 MGMT_EV_USER_CONFIRM_REQUEST,
110 MGMT_EV_USER_PASSKEY_REQUEST,
111 MGMT_EV_AUTH_FAILED,
112 MGMT_EV_DEVICE_FOUND,
113 MGMT_EV_DISCOVERING,
114 MGMT_EV_DEVICE_BLOCKED,
115 MGMT_EV_DEVICE_UNBLOCKED,
116 MGMT_EV_DEVICE_UNPAIRED,
117 MGMT_EV_PASSKEY_NOTIFY,
118 MGMT_EV_NEW_IRK,
119 MGMT_EV_NEW_CSRK,
120 MGMT_EV_DEVICE_ADDED,
121 MGMT_EV_DEVICE_REMOVED,
122 MGMT_EV_NEW_CONN_PARAM,
123 MGMT_EV_UNCONF_INDEX_ADDED,
124 MGMT_EV_UNCONF_INDEX_REMOVED,
125 };
126
127 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
128
129 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
130 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
131
132 struct pending_cmd {
133 struct list_head list;
134 u16 opcode;
135 int index;
136 void *param;
137 struct sock *sk;
138 void *user_data;
139 };
140
141 /* HCI to MGMT error code conversion table */
142 static u8 mgmt_status_table[] = {
143 MGMT_STATUS_SUCCESS,
144 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
145 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
146 MGMT_STATUS_FAILED, /* Hardware Failure */
147 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
148 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
149 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
150 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
151 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
154 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
155 MGMT_STATUS_BUSY, /* Command Disallowed */
156 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
157 MGMT_STATUS_REJECTED, /* Rejected Security */
158 MGMT_STATUS_REJECTED, /* Rejected Personal */
159 MGMT_STATUS_TIMEOUT, /* Host Timeout */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
162 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
163 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
164 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
165 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
166 MGMT_STATUS_BUSY, /* Repeated Attempts */
167 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
168 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
170 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
171 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
172 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
173 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
174 MGMT_STATUS_FAILED, /* Unspecified Error */
175 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
176 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
177 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
178 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
179 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
180 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
181 MGMT_STATUS_FAILED, /* Unit Link Key Used */
182 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
183 MGMT_STATUS_TIMEOUT, /* Instant Passed */
184 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
185 MGMT_STATUS_FAILED, /* Transaction Collision */
186 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
187 MGMT_STATUS_REJECTED, /* QoS Rejected */
188 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
189 MGMT_STATUS_REJECTED, /* Insufficient Security */
190 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
191 MGMT_STATUS_BUSY, /* Role Switch Pending */
192 MGMT_STATUS_FAILED, /* Slot Violation */
193 MGMT_STATUS_FAILED, /* Role Switch Failed */
194 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
195 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
196 MGMT_STATUS_BUSY, /* Host Busy Pairing */
197 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
198 MGMT_STATUS_BUSY, /* Controller Busy */
199 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
200 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
201 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
202 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
203 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
204 };
205
206 static u8 mgmt_status(u8 hci_status)
207 {
208 if (hci_status < ARRAY_SIZE(mgmt_status_table))
209 return mgmt_status_table[hci_status];
210
211 return MGMT_STATUS_FAILED;
212 }
213
214 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
215 {
216 struct sk_buff *skb;
217 struct mgmt_hdr *hdr;
218 struct mgmt_ev_cmd_status *ev;
219 int err;
220
221 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
222
223 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228
229 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
230 hdr->index = cpu_to_le16(index);
231 hdr->len = cpu_to_le16(sizeof(*ev));
232
233 ev = (void *) skb_put(skb, sizeof(*ev));
234 ev->status = status;
235 ev->opcode = cpu_to_le16(cmd);
236
237 err = sock_queue_rcv_skb(sk, skb);
238 if (err < 0)
239 kfree_skb(skb);
240
241 return err;
242 }
243
244 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
245 void *rp, size_t rp_len)
246 {
247 struct sk_buff *skb;
248 struct mgmt_hdr *hdr;
249 struct mgmt_ev_cmd_complete *ev;
250 int err;
251
252 BT_DBG("sock %p", sk);
253
254 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
255 if (!skb)
256 return -ENOMEM;
257
258 hdr = (void *) skb_put(skb, sizeof(*hdr));
259
260 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
261 hdr->index = cpu_to_le16(index);
262 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
263
264 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
265 ev->opcode = cpu_to_le16(cmd);
266 ev->status = status;
267
268 if (rp)
269 memcpy(ev->data, rp, rp_len);
270
271 err = sock_queue_rcv_skb(sk, skb);
272 if (err < 0)
273 kfree_skb(skb);
274
275 return err;
276 }
277
278 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
279 u16 data_len)
280 {
281 struct mgmt_rp_read_version rp;
282
283 BT_DBG("sock %p", sk);
284
285 rp.version = MGMT_VERSION;
286 rp.revision = cpu_to_le16(MGMT_REVISION);
287
288 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
289 sizeof(rp));
290 }
291
292 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
293 u16 data_len)
294 {
295 struct mgmt_rp_read_commands *rp;
296 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
297 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 __le16 *opcode;
299 size_t rp_size;
300 int i, err;
301
302 BT_DBG("sock %p", sk);
303
304 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305
306 rp = kmalloc(rp_size, GFP_KERNEL);
307 if (!rp)
308 return -ENOMEM;
309
310 rp->num_commands = cpu_to_le16(num_commands);
311 rp->num_events = cpu_to_le16(num_events);
312
313 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
314 put_unaligned_le16(mgmt_commands[i], opcode);
315
316 for (i = 0; i < num_events; i++, opcode++)
317 put_unaligned_le16(mgmt_events[i], opcode);
318
319 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
320 rp_size);
321 kfree(rp);
322
323 return err;
324 }
325
326 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
327 u16 data_len)
328 {
329 struct mgmt_rp_read_index_list *rp;
330 struct hci_dev *d;
331 size_t rp_len;
332 u16 count;
333 int err;
334
335 BT_DBG("sock %p", sk);
336
337 read_lock(&hci_dev_list_lock);
338
339 count = 0;
340 list_for_each_entry(d, &hci_dev_list, list) {
341 if (d->dev_type == HCI_BREDR &&
342 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
343 count++;
344 }
345
346 rp_len = sizeof(*rp) + (2 * count);
347 rp = kmalloc(rp_len, GFP_ATOMIC);
348 if (!rp) {
349 read_unlock(&hci_dev_list_lock);
350 return -ENOMEM;
351 }
352
353 count = 0;
354 list_for_each_entry(d, &hci_dev_list, list) {
355 if (test_bit(HCI_SETUP, &d->dev_flags) ||
356 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
357 continue;
358
359 /* Devices marked as raw-only are neither configured
360 * nor unconfigured controllers.
361 */
362 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
363 continue;
364
365 if (d->dev_type == HCI_BREDR &&
366 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
367 rp->index[count++] = cpu_to_le16(d->id);
368 BT_DBG("Added hci%u", d->id);
369 }
370 }
371
372 rp->num_controllers = cpu_to_le16(count);
373 rp_len = sizeof(*rp) + (2 * count);
374
375 read_unlock(&hci_dev_list_lock);
376
377 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
378 rp_len);
379
380 kfree(rp);
381
382 return err;
383 }
384
385 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
386 void *data, u16 data_len)
387 {
388 struct mgmt_rp_read_unconf_index_list *rp;
389 struct hci_dev *d;
390 size_t rp_len;
391 u16 count;
392 int err;
393
394 BT_DBG("sock %p", sk);
395
396 read_lock(&hci_dev_list_lock);
397
398 count = 0;
399 list_for_each_entry(d, &hci_dev_list, list) {
400 if (d->dev_type == HCI_BREDR &&
401 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
402 count++;
403 }
404
405 rp_len = sizeof(*rp) + (2 * count);
406 rp = kmalloc(rp_len, GFP_ATOMIC);
407 if (!rp) {
408 read_unlock(&hci_dev_list_lock);
409 return -ENOMEM;
410 }
411
412 count = 0;
413 list_for_each_entry(d, &hci_dev_list, list) {
414 if (test_bit(HCI_SETUP, &d->dev_flags) ||
415 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
416 continue;
417
418 /* Devices marked as raw-only are neither configured
419 * nor unconfigured controllers.
420 */
421 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
422 continue;
423
424 if (d->dev_type == HCI_BREDR &&
425 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
426 rp->index[count++] = cpu_to_le16(d->id);
427 BT_DBG("Added hci%u", d->id);
428 }
429 }
430
431 rp->num_controllers = cpu_to_le16(count);
432 rp_len = sizeof(*rp) + (2 * count);
433
434 read_unlock(&hci_dev_list_lock);
435
436 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
437 0, rp, rp_len);
438
439 kfree(rp);
440
441 return err;
442 }
443
444 static __le32 get_missing_options(struct hci_dev *hdev)
445 {
446 u32 options = 0;
447
448 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
449 !bacmp(&hdev->public_addr, BDADDR_ANY))
450 options |= MGMT_OPTION_PUBLIC_ADDRESS;
451
452 return cpu_to_le32(options);
453 }
454
455 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
456 void *data, u16 data_len)
457 {
458 struct mgmt_rp_read_config_info rp;
459 u32 options = 0;
460
461 BT_DBG("sock %p %s", sk, hdev->name);
462
463 hci_dev_lock(hdev);
464
465 memset(&rp, 0, sizeof(rp));
466 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
467
468 if (hdev->set_bdaddr)
469 options |= MGMT_OPTION_PUBLIC_ADDRESS;
470
471 rp.supported_options = cpu_to_le32(options);
472 rp.missing_options = get_missing_options(hdev);
473
474 hci_dev_unlock(hdev);
475
476 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
477 sizeof(rp));
478 }
479
480 static u32 get_supported_settings(struct hci_dev *hdev)
481 {
482 u32 settings = 0;
483
484 settings |= MGMT_SETTING_POWERED;
485 settings |= MGMT_SETTING_PAIRABLE;
486 settings |= MGMT_SETTING_DEBUG_KEYS;
487
488 if (lmp_bredr_capable(hdev)) {
489 settings |= MGMT_SETTING_CONNECTABLE;
490 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
491 settings |= MGMT_SETTING_FAST_CONNECTABLE;
492 settings |= MGMT_SETTING_DISCOVERABLE;
493 settings |= MGMT_SETTING_BREDR;
494 settings |= MGMT_SETTING_LINK_SECURITY;
495
496 if (lmp_ssp_capable(hdev)) {
497 settings |= MGMT_SETTING_SSP;
498 settings |= MGMT_SETTING_HS;
499 }
500
501 if (lmp_sc_capable(hdev) ||
502 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
503 settings |= MGMT_SETTING_SECURE_CONN;
504 }
505
506 if (lmp_le_capable(hdev)) {
507 settings |= MGMT_SETTING_LE;
508 settings |= MGMT_SETTING_ADVERTISING;
509 settings |= MGMT_SETTING_PRIVACY;
510 }
511
512 if (hdev->set_bdaddr)
513 settings |= MGMT_SETTING_CONFIGURATION;
514
515 return settings;
516 }
517
518 static u32 get_current_settings(struct hci_dev *hdev)
519 {
520 u32 settings = 0;
521
522 if (hdev_is_powered(hdev))
523 settings |= MGMT_SETTING_POWERED;
524
525 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
526 settings |= MGMT_SETTING_CONNECTABLE;
527
528 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
529 settings |= MGMT_SETTING_FAST_CONNECTABLE;
530
531 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
532 settings |= MGMT_SETTING_DISCOVERABLE;
533
534 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
535 settings |= MGMT_SETTING_PAIRABLE;
536
537 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
538 settings |= MGMT_SETTING_BREDR;
539
540 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
541 settings |= MGMT_SETTING_LE;
542
543 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
544 settings |= MGMT_SETTING_LINK_SECURITY;
545
546 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
547 settings |= MGMT_SETTING_SSP;
548
549 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
550 settings |= MGMT_SETTING_HS;
551
552 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
553 settings |= MGMT_SETTING_ADVERTISING;
554
555 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
556 settings |= MGMT_SETTING_SECURE_CONN;
557
558 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
559 settings |= MGMT_SETTING_DEBUG_KEYS;
560
561 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
562 settings |= MGMT_SETTING_PRIVACY;
563
564 return settings;
565 }
566
567 #define PNP_INFO_SVCLASS_ID 0x1200
568
569 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
570 {
571 u8 *ptr = data, *uuids_start = NULL;
572 struct bt_uuid *uuid;
573
574 if (len < 4)
575 return ptr;
576
577 list_for_each_entry(uuid, &hdev->uuids, list) {
578 u16 uuid16;
579
580 if (uuid->size != 16)
581 continue;
582
583 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
584 if (uuid16 < 0x1100)
585 continue;
586
587 if (uuid16 == PNP_INFO_SVCLASS_ID)
588 continue;
589
590 if (!uuids_start) {
591 uuids_start = ptr;
592 uuids_start[0] = 1;
593 uuids_start[1] = EIR_UUID16_ALL;
594 ptr += 2;
595 }
596
597 /* Stop if not enough space to put next UUID */
598 if ((ptr - data) + sizeof(u16) > len) {
599 uuids_start[1] = EIR_UUID16_SOME;
600 break;
601 }
602
603 *ptr++ = (uuid16 & 0x00ff);
604 *ptr++ = (uuid16 & 0xff00) >> 8;
605 uuids_start[0] += sizeof(uuid16);
606 }
607
608 return ptr;
609 }
610
611 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
612 {
613 u8 *ptr = data, *uuids_start = NULL;
614 struct bt_uuid *uuid;
615
616 if (len < 6)
617 return ptr;
618
619 list_for_each_entry(uuid, &hdev->uuids, list) {
620 if (uuid->size != 32)
621 continue;
622
623 if (!uuids_start) {
624 uuids_start = ptr;
625 uuids_start[0] = 1;
626 uuids_start[1] = EIR_UUID32_ALL;
627 ptr += 2;
628 }
629
630 /* Stop if not enough space to put next UUID */
631 if ((ptr - data) + sizeof(u32) > len) {
632 uuids_start[1] = EIR_UUID32_SOME;
633 break;
634 }
635
636 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
637 ptr += sizeof(u32);
638 uuids_start[0] += sizeof(u32);
639 }
640
641 return ptr;
642 }
643
644 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
645 {
646 u8 *ptr = data, *uuids_start = NULL;
647 struct bt_uuid *uuid;
648
649 if (len < 18)
650 return ptr;
651
652 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 128)
654 continue;
655
656 if (!uuids_start) {
657 uuids_start = ptr;
658 uuids_start[0] = 1;
659 uuids_start[1] = EIR_UUID128_ALL;
660 ptr += 2;
661 }
662
663 /* Stop if not enough space to put next UUID */
664 if ((ptr - data) + 16 > len) {
665 uuids_start[1] = EIR_UUID128_SOME;
666 break;
667 }
668
669 memcpy(ptr, uuid->uuid, 16);
670 ptr += 16;
671 uuids_start[0] += 16;
672 }
673
674 return ptr;
675 }
676
677 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
678 {
679 struct pending_cmd *cmd;
680
681 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
682 if (cmd->opcode == opcode)
683 return cmd;
684 }
685
686 return NULL;
687 }
688
689 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
690 struct hci_dev *hdev,
691 const void *data)
692 {
693 struct pending_cmd *cmd;
694
695 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
696 if (cmd->user_data != data)
697 continue;
698 if (cmd->opcode == opcode)
699 return cmd;
700 }
701
702 return NULL;
703 }
704
705 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
706 {
707 u8 ad_len = 0;
708 size_t name_len;
709
710 name_len = strlen(hdev->dev_name);
711 if (name_len > 0) {
712 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
713
714 if (name_len > max_len) {
715 name_len = max_len;
716 ptr[1] = EIR_NAME_SHORT;
717 } else
718 ptr[1] = EIR_NAME_COMPLETE;
719
720 ptr[0] = name_len + 1;
721
722 memcpy(ptr + 2, hdev->dev_name, name_len);
723
724 ad_len += (name_len + 2);
725 ptr += (name_len + 2);
726 }
727
728 return ad_len;
729 }
730
731 static void update_scan_rsp_data(struct hci_request *req)
732 {
733 struct hci_dev *hdev = req->hdev;
734 struct hci_cp_le_set_scan_rsp_data cp;
735 u8 len;
736
737 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
738 return;
739
740 memset(&cp, 0, sizeof(cp));
741
742 len = create_scan_rsp_data(hdev, cp.data);
743
744 if (hdev->scan_rsp_data_len == len &&
745 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
746 return;
747
748 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
749 hdev->scan_rsp_data_len = len;
750
751 cp.length = len;
752
753 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
754 }
755
756 static u8 get_adv_discov_flags(struct hci_dev *hdev)
757 {
758 struct pending_cmd *cmd;
759
760 /* If there's a pending mgmt command the flags will not yet have
761 * their final values, so check for this first.
762 */
763 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
764 if (cmd) {
765 struct mgmt_mode *cp = cmd->param;
766 if (cp->val == 0x01)
767 return LE_AD_GENERAL;
768 else if (cp->val == 0x02)
769 return LE_AD_LIMITED;
770 } else {
771 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
772 return LE_AD_LIMITED;
773 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
774 return LE_AD_GENERAL;
775 }
776
777 return 0;
778 }
779
780 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
781 {
782 u8 ad_len = 0, flags = 0;
783
784 flags |= get_adv_discov_flags(hdev);
785
786 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
787 flags |= LE_AD_NO_BREDR;
788
789 if (flags) {
790 BT_DBG("adv flags 0x%02x", flags);
791
792 ptr[0] = 2;
793 ptr[1] = EIR_FLAGS;
794 ptr[2] = flags;
795
796 ad_len += 3;
797 ptr += 3;
798 }
799
800 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
801 ptr[0] = 2;
802 ptr[1] = EIR_TX_POWER;
803 ptr[2] = (u8) hdev->adv_tx_power;
804
805 ad_len += 3;
806 ptr += 3;
807 }
808
809 return ad_len;
810 }
811
812 static void update_adv_data(struct hci_request *req)
813 {
814 struct hci_dev *hdev = req->hdev;
815 struct hci_cp_le_set_adv_data cp;
816 u8 len;
817
818 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
819 return;
820
821 memset(&cp, 0, sizeof(cp));
822
823 len = create_adv_data(hdev, cp.data);
824
825 if (hdev->adv_data_len == len &&
826 memcmp(cp.data, hdev->adv_data, len) == 0)
827 return;
828
829 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
830 hdev->adv_data_len = len;
831
832 cp.length = len;
833
834 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
835 }
836
837 static void create_eir(struct hci_dev *hdev, u8 *data)
838 {
839 u8 *ptr = data;
840 size_t name_len;
841
842 name_len = strlen(hdev->dev_name);
843
844 if (name_len > 0) {
845 /* EIR Data type */
846 if (name_len > 48) {
847 name_len = 48;
848 ptr[1] = EIR_NAME_SHORT;
849 } else
850 ptr[1] = EIR_NAME_COMPLETE;
851
852 /* EIR Data length */
853 ptr[0] = name_len + 1;
854
855 memcpy(ptr + 2, hdev->dev_name, name_len);
856
857 ptr += (name_len + 2);
858 }
859
860 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
861 ptr[0] = 2;
862 ptr[1] = EIR_TX_POWER;
863 ptr[2] = (u8) hdev->inq_tx_power;
864
865 ptr += 3;
866 }
867
868 if (hdev->devid_source > 0) {
869 ptr[0] = 9;
870 ptr[1] = EIR_DEVICE_ID;
871
872 put_unaligned_le16(hdev->devid_source, ptr + 2);
873 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
874 put_unaligned_le16(hdev->devid_product, ptr + 6);
875 put_unaligned_le16(hdev->devid_version, ptr + 8);
876
877 ptr += 10;
878 }
879
880 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
881 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
882 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
883 }
884
885 static void update_eir(struct hci_request *req)
886 {
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_write_eir cp;
889
890 if (!hdev_is_powered(hdev))
891 return;
892
893 if (!lmp_ext_inq_capable(hdev))
894 return;
895
896 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
897 return;
898
899 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
900 return;
901
902 memset(&cp, 0, sizeof(cp));
903
904 create_eir(hdev, cp.data);
905
906 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
907 return;
908
909 memcpy(hdev->eir, cp.data, sizeof(cp.data));
910
911 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
912 }
913
914 static u8 get_service_classes(struct hci_dev *hdev)
915 {
916 struct bt_uuid *uuid;
917 u8 val = 0;
918
919 list_for_each_entry(uuid, &hdev->uuids, list)
920 val |= uuid->svc_hint;
921
922 return val;
923 }
924
925 static void update_class(struct hci_request *req)
926 {
927 struct hci_dev *hdev = req->hdev;
928 u8 cod[3];
929
930 BT_DBG("%s", hdev->name);
931
932 if (!hdev_is_powered(hdev))
933 return;
934
935 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
936 return;
937
938 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
939 return;
940
941 cod[0] = hdev->minor_class;
942 cod[1] = hdev->major_class;
943 cod[2] = get_service_classes(hdev);
944
945 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
946 cod[1] |= 0x20;
947
948 if (memcmp(cod, hdev->dev_class, 3) == 0)
949 return;
950
951 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
952 }
953
954 static bool get_connectable(struct hci_dev *hdev)
955 {
956 struct pending_cmd *cmd;
957
958 /* If there's a pending mgmt command the flag will not yet have
959 * it's final value, so check for this first.
960 */
961 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
962 if (cmd) {
963 struct mgmt_mode *cp = cmd->param;
964 return cp->val;
965 }
966
967 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
968 }
969
970 static void enable_advertising(struct hci_request *req)
971 {
972 struct hci_dev *hdev = req->hdev;
973 struct hci_cp_le_set_adv_param cp;
974 u8 own_addr_type, enable = 0x01;
975 bool connectable;
976
977 /* Clear the HCI_ADVERTISING bit temporarily so that the
978 * hci_update_random_address knows that it's safe to go ahead
979 * and write a new random address. The flag will be set back on
980 * as soon as the SET_ADV_ENABLE HCI command completes.
981 */
982 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
983
984 connectable = get_connectable(hdev);
985
986 /* Set require_privacy to true only when non-connectable
987 * advertising is used. In that case it is fine to use a
988 * non-resolvable private address.
989 */
990 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
991 return;
992
993 memset(&cp, 0, sizeof(cp));
994 cp.min_interval = cpu_to_le16(0x0800);
995 cp.max_interval = cpu_to_le16(0x0800);
996 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
997 cp.own_address_type = own_addr_type;
998 cp.channel_map = hdev->le_adv_channel_map;
999
1000 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1001
1002 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1003 }
1004
1005 static void disable_advertising(struct hci_request *req)
1006 {
1007 u8 enable = 0x00;
1008
1009 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1010 }
1011
1012 static void service_cache_off(struct work_struct *work)
1013 {
1014 struct hci_dev *hdev = container_of(work, struct hci_dev,
1015 service_cache.work);
1016 struct hci_request req;
1017
1018 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1019 return;
1020
1021 hci_req_init(&req, hdev);
1022
1023 hci_dev_lock(hdev);
1024
1025 update_eir(&req);
1026 update_class(&req);
1027
1028 hci_dev_unlock(hdev);
1029
1030 hci_req_run(&req, NULL);
1031 }
1032
1033 static void rpa_expired(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 rpa_expired.work);
1037 struct hci_request req;
1038
1039 BT_DBG("");
1040
1041 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1042
1043 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1044 hci_conn_num(hdev, LE_LINK) > 0)
1045 return;
1046
1047 /* The generation of a new RPA and programming it into the
1048 * controller happens in the enable_advertising() function.
1049 */
1050
1051 hci_req_init(&req, hdev);
1052
1053 disable_advertising(&req);
1054 enable_advertising(&req);
1055
1056 hci_req_run(&req, NULL);
1057 }
1058
1059 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1060 {
1061 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1062 return;
1063
1064 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1065 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1066
1067 /* Non-mgmt controlled devices get this bit set
1068 * implicitly so that pairing works for them, however
1069 * for mgmt we require user-space to explicitly enable
1070 * it
1071 */
1072 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1073 }
1074
1075 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1076 void *data, u16 data_len)
1077 {
1078 struct mgmt_rp_read_info rp;
1079
1080 BT_DBG("sock %p %s", sk, hdev->name);
1081
1082 hci_dev_lock(hdev);
1083
1084 memset(&rp, 0, sizeof(rp));
1085
1086 bacpy(&rp.bdaddr, &hdev->bdaddr);
1087
1088 rp.version = hdev->hci_ver;
1089 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1090
1091 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1093
1094 memcpy(rp.dev_class, hdev->dev_class, 3);
1095
1096 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1097 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1098
1099 hci_dev_unlock(hdev);
1100
1101 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1102 sizeof(rp));
1103 }
1104
1105 static void mgmt_pending_free(struct pending_cmd *cmd)
1106 {
1107 sock_put(cmd->sk);
1108 kfree(cmd->param);
1109 kfree(cmd);
1110 }
1111
1112 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1113 struct hci_dev *hdev, void *data,
1114 u16 len)
1115 {
1116 struct pending_cmd *cmd;
1117
1118 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1119 if (!cmd)
1120 return NULL;
1121
1122 cmd->opcode = opcode;
1123 cmd->index = hdev->id;
1124
1125 cmd->param = kmalloc(len, GFP_KERNEL);
1126 if (!cmd->param) {
1127 kfree(cmd);
1128 return NULL;
1129 }
1130
1131 if (data)
1132 memcpy(cmd->param, data, len);
1133
1134 cmd->sk = sk;
1135 sock_hold(sk);
1136
1137 list_add(&cmd->list, &hdev->mgmt_pending);
1138
1139 return cmd;
1140 }
1141
1142 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1143 void (*cb)(struct pending_cmd *cmd,
1144 void *data),
1145 void *data)
1146 {
1147 struct pending_cmd *cmd, *tmp;
1148
1149 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1150 if (opcode > 0 && cmd->opcode != opcode)
1151 continue;
1152
1153 cb(cmd, data);
1154 }
1155 }
1156
1157 static void mgmt_pending_remove(struct pending_cmd *cmd)
1158 {
1159 list_del(&cmd->list);
1160 mgmt_pending_free(cmd);
1161 }
1162
1163 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1164 {
1165 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1166
1167 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1168 sizeof(settings));
1169 }
1170
1171 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1172 {
1173 BT_DBG("%s status 0x%02x", hdev->name, status);
1174
1175 if (hci_conn_count(hdev) == 0) {
1176 cancel_delayed_work(&hdev->power_off);
1177 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1178 }
1179 }
1180
1181 static void hci_stop_discovery(struct hci_request *req)
1182 {
1183 struct hci_dev *hdev = req->hdev;
1184 struct hci_cp_remote_name_req_cancel cp;
1185 struct inquiry_entry *e;
1186
1187 switch (hdev->discovery.state) {
1188 case DISCOVERY_FINDING:
1189 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1190 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1191 } else {
1192 cancel_delayed_work(&hdev->le_scan_disable);
1193 hci_req_add_le_scan_disable(req);
1194 }
1195
1196 break;
1197
1198 case DISCOVERY_RESOLVING:
1199 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1200 NAME_PENDING);
1201 if (!e)
1202 return;
1203
1204 bacpy(&cp.bdaddr, &e->data.bdaddr);
1205 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1206 &cp);
1207
1208 break;
1209
1210 default:
1211 /* Passive scanning */
1212 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1213 hci_req_add_le_scan_disable(req);
1214 break;
1215 }
1216 }
1217
1218 static int clean_up_hci_state(struct hci_dev *hdev)
1219 {
1220 struct hci_request req;
1221 struct hci_conn *conn;
1222
1223 hci_req_init(&req, hdev);
1224
1225 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1226 test_bit(HCI_PSCAN, &hdev->flags)) {
1227 u8 scan = 0x00;
1228 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1229 }
1230
1231 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1232 disable_advertising(&req);
1233
1234 hci_stop_discovery(&req);
1235
1236 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1237 struct hci_cp_disconnect dc;
1238 struct hci_cp_reject_conn_req rej;
1239
1240 switch (conn->state) {
1241 case BT_CONNECTED:
1242 case BT_CONFIG:
1243 dc.handle = cpu_to_le16(conn->handle);
1244 dc.reason = 0x15; /* Terminated due to Power Off */
1245 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1246 break;
1247 case BT_CONNECT:
1248 if (conn->type == LE_LINK)
1249 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1250 0, NULL);
1251 else if (conn->type == ACL_LINK)
1252 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1253 6, &conn->dst);
1254 break;
1255 case BT_CONNECT2:
1256 bacpy(&rej.bdaddr, &conn->dst);
1257 rej.reason = 0x15; /* Terminated due to Power Off */
1258 if (conn->type == ACL_LINK)
1259 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1260 sizeof(rej), &rej);
1261 else if (conn->type == SCO_LINK)
1262 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1263 sizeof(rej), &rej);
1264 break;
1265 }
1266 }
1267
1268 return hci_req_run(&req, clean_up_hci_complete);
1269 }
1270
1271 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1272 u16 len)
1273 {
1274 struct mgmt_mode *cp = data;
1275 struct pending_cmd *cmd;
1276 int err;
1277
1278 BT_DBG("request for %s", hdev->name);
1279
1280 if (cp->val != 0x00 && cp->val != 0x01)
1281 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1282 MGMT_STATUS_INVALID_PARAMS);
1283
1284 hci_dev_lock(hdev);
1285
1286 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1288 MGMT_STATUS_BUSY);
1289 goto failed;
1290 }
1291
1292 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1293 cancel_delayed_work(&hdev->power_off);
1294
1295 if (cp->val) {
1296 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1297 data, len);
1298 err = mgmt_powered(hdev, 1);
1299 goto failed;
1300 }
1301 }
1302
1303 if (!!cp->val == hdev_is_powered(hdev)) {
1304 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1305 goto failed;
1306 }
1307
1308 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1309 if (!cmd) {
1310 err = -ENOMEM;
1311 goto failed;
1312 }
1313
1314 if (cp->val) {
1315 queue_work(hdev->req_workqueue, &hdev->power_on);
1316 err = 0;
1317 } else {
1318 /* Disconnect connections, stop scans, etc */
1319 err = clean_up_hci_state(hdev);
1320 if (!err)
1321 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1322 HCI_POWER_OFF_TIMEOUT);
1323
1324 /* ENODATA means there were no HCI commands queued */
1325 if (err == -ENODATA) {
1326 cancel_delayed_work(&hdev->power_off);
1327 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1328 err = 0;
1329 }
1330 }
1331
1332 failed:
1333 hci_dev_unlock(hdev);
1334 return err;
1335 }
1336
1337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1338 struct sock *skip_sk)
1339 {
1340 struct sk_buff *skb;
1341 struct mgmt_hdr *hdr;
1342
1343 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1344 if (!skb)
1345 return -ENOMEM;
1346
1347 hdr = (void *) skb_put(skb, sizeof(*hdr));
1348 hdr->opcode = cpu_to_le16(event);
1349 if (hdev)
1350 hdr->index = cpu_to_le16(hdev->id);
1351 else
1352 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1353 hdr->len = cpu_to_le16(data_len);
1354
1355 if (data)
1356 memcpy(skb_put(skb, data_len), data, data_len);
1357
1358 /* Time stamp */
1359 __net_timestamp(skb);
1360
1361 hci_send_to_control(skb, skip_sk);
1362 kfree_skb(skb);
1363
1364 return 0;
1365 }
1366
1367 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1368 {
1369 __le32 ev;
1370
1371 ev = cpu_to_le32(get_current_settings(hdev));
1372
1373 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1374 }
1375
1376 struct cmd_lookup {
1377 struct sock *sk;
1378 struct hci_dev *hdev;
1379 u8 mgmt_status;
1380 };
1381
1382 static void settings_rsp(struct pending_cmd *cmd, void *data)
1383 {
1384 struct cmd_lookup *match = data;
1385
1386 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1387
1388 list_del(&cmd->list);
1389
1390 if (match->sk == NULL) {
1391 match->sk = cmd->sk;
1392 sock_hold(match->sk);
1393 }
1394
1395 mgmt_pending_free(cmd);
1396 }
1397
1398 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1399 {
1400 u8 *status = data;
1401
1402 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1403 mgmt_pending_remove(cmd);
1404 }
1405
1406 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1407 {
1408 if (!lmp_bredr_capable(hdev))
1409 return MGMT_STATUS_NOT_SUPPORTED;
1410 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1411 return MGMT_STATUS_REJECTED;
1412 else
1413 return MGMT_STATUS_SUCCESS;
1414 }
1415
1416 static u8 mgmt_le_support(struct hci_dev *hdev)
1417 {
1418 if (!lmp_le_capable(hdev))
1419 return MGMT_STATUS_NOT_SUPPORTED;
1420 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1421 return MGMT_STATUS_REJECTED;
1422 else
1423 return MGMT_STATUS_SUCCESS;
1424 }
1425
1426 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1427 {
1428 struct pending_cmd *cmd;
1429 struct mgmt_mode *cp;
1430 struct hci_request req;
1431 bool changed;
1432
1433 BT_DBG("status 0x%02x", status);
1434
1435 hci_dev_lock(hdev);
1436
1437 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1438 if (!cmd)
1439 goto unlock;
1440
1441 if (status) {
1442 u8 mgmt_err = mgmt_status(status);
1443 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1444 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1445 goto remove_cmd;
1446 }
1447
1448 cp = cmd->param;
1449 if (cp->val) {
1450 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1451 &hdev->dev_flags);
1452
1453 if (hdev->discov_timeout > 0) {
1454 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1455 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1456 to);
1457 }
1458 } else {
1459 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1460 &hdev->dev_flags);
1461 }
1462
1463 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1464
1465 if (changed)
1466 new_settings(hdev, cmd->sk);
1467
1468 /* When the discoverable mode gets changed, make sure
1469 * that class of device has the limited discoverable
1470 * bit correctly set.
1471 */
1472 hci_req_init(&req, hdev);
1473 update_class(&req);
1474 hci_req_run(&req, NULL);
1475
1476 remove_cmd:
1477 mgmt_pending_remove(cmd);
1478
1479 unlock:
1480 hci_dev_unlock(hdev);
1481 }
1482
1483 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1484 u16 len)
1485 {
1486 struct mgmt_cp_set_discoverable *cp = data;
1487 struct pending_cmd *cmd;
1488 struct hci_request req;
1489 u16 timeout;
1490 u8 scan;
1491 int err;
1492
1493 BT_DBG("request for %s", hdev->name);
1494
1495 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1496 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1497 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1498 MGMT_STATUS_REJECTED);
1499
1500 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1501 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1502 MGMT_STATUS_INVALID_PARAMS);
1503
1504 timeout = __le16_to_cpu(cp->timeout);
1505
1506 /* Disabling discoverable requires that no timeout is set,
1507 * and enabling limited discoverable requires a timeout.
1508 */
1509 if ((cp->val == 0x00 && timeout > 0) ||
1510 (cp->val == 0x02 && timeout == 0))
1511 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1512 MGMT_STATUS_INVALID_PARAMS);
1513
1514 hci_dev_lock(hdev);
1515
1516 if (!hdev_is_powered(hdev) && timeout > 0) {
1517 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1518 MGMT_STATUS_NOT_POWERED);
1519 goto failed;
1520 }
1521
1522 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1523 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1524 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1525 MGMT_STATUS_BUSY);
1526 goto failed;
1527 }
1528
1529 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1530 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1531 MGMT_STATUS_REJECTED);
1532 goto failed;
1533 }
1534
1535 if (!hdev_is_powered(hdev)) {
1536 bool changed = false;
1537
1538 /* Setting limited discoverable when powered off is
1539 * not a valid operation since it requires a timeout
1540 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1541 */
1542 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1543 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1544 changed = true;
1545 }
1546
1547 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1548 if (err < 0)
1549 goto failed;
1550
1551 if (changed)
1552 err = new_settings(hdev, sk);
1553
1554 goto failed;
1555 }
1556
1557 /* If the current mode is the same, then just update the timeout
1558 * value with the new value. And if only the timeout gets updated,
1559 * then no need for any HCI transactions.
1560 */
1561 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1562 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1563 &hdev->dev_flags)) {
1564 cancel_delayed_work(&hdev->discov_off);
1565 hdev->discov_timeout = timeout;
1566
1567 if (cp->val && hdev->discov_timeout > 0) {
1568 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1569 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1570 to);
1571 }
1572
1573 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1574 goto failed;
1575 }
1576
1577 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1578 if (!cmd) {
1579 err = -ENOMEM;
1580 goto failed;
1581 }
1582
1583 /* Cancel any potential discoverable timeout that might be
1584 * still active and store new timeout value. The arming of
1585 * the timeout happens in the complete handler.
1586 */
1587 cancel_delayed_work(&hdev->discov_off);
1588 hdev->discov_timeout = timeout;
1589
1590 /* Limited discoverable mode */
1591 if (cp->val == 0x02)
1592 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1593 else
1594 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1595
1596 hci_req_init(&req, hdev);
1597
1598 /* The procedure for LE-only controllers is much simpler - just
1599 * update the advertising data.
1600 */
1601 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1602 goto update_ad;
1603
1604 scan = SCAN_PAGE;
1605
1606 if (cp->val) {
1607 struct hci_cp_write_current_iac_lap hci_cp;
1608
1609 if (cp->val == 0x02) {
1610 /* Limited discoverable mode */
1611 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1612 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1613 hci_cp.iac_lap[1] = 0x8b;
1614 hci_cp.iac_lap[2] = 0x9e;
1615 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1616 hci_cp.iac_lap[4] = 0x8b;
1617 hci_cp.iac_lap[5] = 0x9e;
1618 } else {
1619 /* General discoverable mode */
1620 hci_cp.num_iac = 1;
1621 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1622 hci_cp.iac_lap[1] = 0x8b;
1623 hci_cp.iac_lap[2] = 0x9e;
1624 }
1625
1626 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1627 (hci_cp.num_iac * 3) + 1, &hci_cp);
1628
1629 scan |= SCAN_INQUIRY;
1630 } else {
1631 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1632 }
1633
1634 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1635
1636 update_ad:
1637 update_adv_data(&req);
1638
1639 err = hci_req_run(&req, set_discoverable_complete);
1640 if (err < 0)
1641 mgmt_pending_remove(cmd);
1642
1643 failed:
1644 hci_dev_unlock(hdev);
1645 return err;
1646 }
1647
1648 static void write_fast_connectable(struct hci_request *req, bool enable)
1649 {
1650 struct hci_dev *hdev = req->hdev;
1651 struct hci_cp_write_page_scan_activity acp;
1652 u8 type;
1653
1654 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1655 return;
1656
1657 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1658 return;
1659
1660 if (enable) {
1661 type = PAGE_SCAN_TYPE_INTERLACED;
1662
1663 /* 160 msec page scan interval */
1664 acp.interval = cpu_to_le16(0x0100);
1665 } else {
1666 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1667
1668 /* default 1.28 sec page scan */
1669 acp.interval = cpu_to_le16(0x0800);
1670 }
1671
1672 acp.window = cpu_to_le16(0x0012);
1673
1674 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1675 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1676 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1677 sizeof(acp), &acp);
1678
1679 if (hdev->page_scan_type != type)
1680 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1681 }
1682
1683 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1684 {
1685 struct pending_cmd *cmd;
1686 struct mgmt_mode *cp;
1687 bool changed;
1688
1689 BT_DBG("status 0x%02x", status);
1690
1691 hci_dev_lock(hdev);
1692
1693 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1694 if (!cmd)
1695 goto unlock;
1696
1697 if (status) {
1698 u8 mgmt_err = mgmt_status(status);
1699 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1700 goto remove_cmd;
1701 }
1702
1703 cp = cmd->param;
1704 if (cp->val)
1705 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1706 else
1707 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1708
1709 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1710
1711 if (changed)
1712 new_settings(hdev, cmd->sk);
1713
1714 remove_cmd:
1715 mgmt_pending_remove(cmd);
1716
1717 unlock:
1718 hci_dev_unlock(hdev);
1719 }
1720
1721 static int set_connectable_update_settings(struct hci_dev *hdev,
1722 struct sock *sk, u8 val)
1723 {
1724 bool changed = false;
1725 int err;
1726
1727 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1728 changed = true;
1729
1730 if (val) {
1731 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1732 } else {
1733 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1734 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1735 }
1736
1737 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1738 if (err < 0)
1739 return err;
1740
1741 if (changed)
1742 return new_settings(hdev, sk);
1743
1744 return 0;
1745 }
1746
1747 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1748 u16 len)
1749 {
1750 struct mgmt_mode *cp = data;
1751 struct pending_cmd *cmd;
1752 struct hci_request req;
1753 u8 scan;
1754 int err;
1755
1756 BT_DBG("request for %s", hdev->name);
1757
1758 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1759 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1760 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1761 MGMT_STATUS_REJECTED);
1762
1763 if (cp->val != 0x00 && cp->val != 0x01)
1764 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_INVALID_PARAMS);
1766
1767 hci_dev_lock(hdev);
1768
1769 if (!hdev_is_powered(hdev)) {
1770 err = set_connectable_update_settings(hdev, sk, cp->val);
1771 goto failed;
1772 }
1773
1774 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1775 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1776 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1777 MGMT_STATUS_BUSY);
1778 goto failed;
1779 }
1780
1781 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1782 if (!cmd) {
1783 err = -ENOMEM;
1784 goto failed;
1785 }
1786
1787 hci_req_init(&req, hdev);
1788
1789 /* If BR/EDR is not enabled and we disable advertising as a
1790 * by-product of disabling connectable, we need to update the
1791 * advertising flags.
1792 */
1793 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1794 if (!cp->val) {
1795 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1796 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1797 }
1798 update_adv_data(&req);
1799 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1800 if (cp->val) {
1801 scan = SCAN_PAGE;
1802 } else {
1803 scan = 0;
1804
1805 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1806 hdev->discov_timeout > 0)
1807 cancel_delayed_work(&hdev->discov_off);
1808 }
1809
1810 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1811 }
1812
1813 /* If we're going from non-connectable to connectable or
1814 * vice-versa when fast connectable is enabled ensure that fast
1815 * connectable gets disabled. write_fast_connectable won't do
1816 * anything if the page scan parameters are already what they
1817 * should be.
1818 */
1819 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1820 write_fast_connectable(&req, false);
1821
1822 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1823 hci_conn_num(hdev, LE_LINK) == 0) {
1824 disable_advertising(&req);
1825 enable_advertising(&req);
1826 }
1827
1828 err = hci_req_run(&req, set_connectable_complete);
1829 if (err < 0) {
1830 mgmt_pending_remove(cmd);
1831 if (err == -ENODATA)
1832 err = set_connectable_update_settings(hdev, sk,
1833 cp->val);
1834 goto failed;
1835 }
1836
1837 failed:
1838 hci_dev_unlock(hdev);
1839 return err;
1840 }
1841
1842 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1843 u16 len)
1844 {
1845 struct mgmt_mode *cp = data;
1846 bool changed;
1847 int err;
1848
1849 BT_DBG("request for %s", hdev->name);
1850
1851 if (cp->val != 0x00 && cp->val != 0x01)
1852 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1853 MGMT_STATUS_INVALID_PARAMS);
1854
1855 hci_dev_lock(hdev);
1856
1857 if (cp->val)
1858 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1859 else
1860 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1861
1862 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1863 if (err < 0)
1864 goto unlock;
1865
1866 if (changed)
1867 err = new_settings(hdev, sk);
1868
1869 unlock:
1870 hci_dev_unlock(hdev);
1871 return err;
1872 }
1873
1874 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1875 u16 len)
1876 {
1877 struct mgmt_mode *cp = data;
1878 struct pending_cmd *cmd;
1879 u8 val, status;
1880 int err;
1881
1882 BT_DBG("request for %s", hdev->name);
1883
1884 status = mgmt_bredr_support(hdev);
1885 if (status)
1886 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1887 status);
1888
1889 if (cp->val != 0x00 && cp->val != 0x01)
1890 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1891 MGMT_STATUS_INVALID_PARAMS);
1892
1893 hci_dev_lock(hdev);
1894
1895 if (!hdev_is_powered(hdev)) {
1896 bool changed = false;
1897
1898 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1899 &hdev->dev_flags)) {
1900 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1901 changed = true;
1902 }
1903
1904 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1905 if (err < 0)
1906 goto failed;
1907
1908 if (changed)
1909 err = new_settings(hdev, sk);
1910
1911 goto failed;
1912 }
1913
1914 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1915 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1916 MGMT_STATUS_BUSY);
1917 goto failed;
1918 }
1919
1920 val = !!cp->val;
1921
1922 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1923 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1924 goto failed;
1925 }
1926
1927 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1928 if (!cmd) {
1929 err = -ENOMEM;
1930 goto failed;
1931 }
1932
1933 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1934 if (err < 0) {
1935 mgmt_pending_remove(cmd);
1936 goto failed;
1937 }
1938
1939 failed:
1940 hci_dev_unlock(hdev);
1941 return err;
1942 }
1943
1944 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 {
1946 struct mgmt_mode *cp = data;
1947 struct pending_cmd *cmd;
1948 u8 status;
1949 int err;
1950
1951 BT_DBG("request for %s", hdev->name);
1952
1953 status = mgmt_bredr_support(hdev);
1954 if (status)
1955 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1956
1957 if (!lmp_ssp_capable(hdev))
1958 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1959 MGMT_STATUS_NOT_SUPPORTED);
1960
1961 if (cp->val != 0x00 && cp->val != 0x01)
1962 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1963 MGMT_STATUS_INVALID_PARAMS);
1964
1965 hci_dev_lock(hdev);
1966
1967 if (!hdev_is_powered(hdev)) {
1968 bool changed;
1969
1970 if (cp->val) {
1971 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1972 &hdev->dev_flags);
1973 } else {
1974 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1975 &hdev->dev_flags);
1976 if (!changed)
1977 changed = test_and_clear_bit(HCI_HS_ENABLED,
1978 &hdev->dev_flags);
1979 else
1980 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1981 }
1982
1983 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1984 if (err < 0)
1985 goto failed;
1986
1987 if (changed)
1988 err = new_settings(hdev, sk);
1989
1990 goto failed;
1991 }
1992
1993 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1994 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1995 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1996 MGMT_STATUS_BUSY);
1997 goto failed;
1998 }
1999
2000 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2001 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2002 goto failed;
2003 }
2004
2005 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2006 if (!cmd) {
2007 err = -ENOMEM;
2008 goto failed;
2009 }
2010
2011 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2012 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2013 sizeof(cp->val), &cp->val);
2014
2015 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2016 if (err < 0) {
2017 mgmt_pending_remove(cmd);
2018 goto failed;
2019 }
2020
2021 failed:
2022 hci_dev_unlock(hdev);
2023 return err;
2024 }
2025
2026 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2027 {
2028 struct mgmt_mode *cp = data;
2029 bool changed;
2030 u8 status;
2031 int err;
2032
2033 BT_DBG("request for %s", hdev->name);
2034
2035 status = mgmt_bredr_support(hdev);
2036 if (status)
2037 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2038
2039 if (!lmp_ssp_capable(hdev))
2040 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2041 MGMT_STATUS_NOT_SUPPORTED);
2042
2043 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2044 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2045 MGMT_STATUS_REJECTED);
2046
2047 if (cp->val != 0x00 && cp->val != 0x01)
2048 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2049 MGMT_STATUS_INVALID_PARAMS);
2050
2051 hci_dev_lock(hdev);
2052
2053 if (cp->val) {
2054 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2055 } else {
2056 if (hdev_is_powered(hdev)) {
2057 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_REJECTED);
2059 goto unlock;
2060 }
2061
2062 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2063 }
2064
2065 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2066 if (err < 0)
2067 goto unlock;
2068
2069 if (changed)
2070 err = new_settings(hdev, sk);
2071
2072 unlock:
2073 hci_dev_unlock(hdev);
2074 return err;
2075 }
2076
2077 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2078 {
2079 struct cmd_lookup match = { NULL, hdev };
2080
2081 if (status) {
2082 u8 mgmt_err = mgmt_status(status);
2083
2084 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2085 &mgmt_err);
2086 return;
2087 }
2088
2089 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2090
2091 new_settings(hdev, match.sk);
2092
2093 if (match.sk)
2094 sock_put(match.sk);
2095
2096 /* Make sure the controller has a good default for
2097 * advertising data. Restrict the update to when LE
2098 * has actually been enabled. During power on, the
2099 * update in powered_update_hci will take care of it.
2100 */
2101 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2102 struct hci_request req;
2103
2104 hci_dev_lock(hdev);
2105
2106 hci_req_init(&req, hdev);
2107 update_adv_data(&req);
2108 update_scan_rsp_data(&req);
2109 hci_req_run(&req, NULL);
2110
2111 hci_dev_unlock(hdev);
2112 }
2113 }
2114
2115 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2116 {
2117 struct mgmt_mode *cp = data;
2118 struct hci_cp_write_le_host_supported hci_cp;
2119 struct pending_cmd *cmd;
2120 struct hci_request req;
2121 int err;
2122 u8 val, enabled;
2123
2124 BT_DBG("request for %s", hdev->name);
2125
2126 if (!lmp_le_capable(hdev))
2127 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2128 MGMT_STATUS_NOT_SUPPORTED);
2129
2130 if (cp->val != 0x00 && cp->val != 0x01)
2131 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2132 MGMT_STATUS_INVALID_PARAMS);
2133
2134 /* LE-only devices do not allow toggling LE on/off */
2135 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2136 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2137 MGMT_STATUS_REJECTED);
2138
2139 hci_dev_lock(hdev);
2140
2141 val = !!cp->val;
2142 enabled = lmp_host_le_capable(hdev);
2143
2144 if (!hdev_is_powered(hdev) || val == enabled) {
2145 bool changed = false;
2146
2147 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2148 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2149 changed = true;
2150 }
2151
2152 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2153 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2154 changed = true;
2155 }
2156
2157 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2158 if (err < 0)
2159 goto unlock;
2160
2161 if (changed)
2162 err = new_settings(hdev, sk);
2163
2164 goto unlock;
2165 }
2166
2167 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2168 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2169 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2170 MGMT_STATUS_BUSY);
2171 goto unlock;
2172 }
2173
2174 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2175 if (!cmd) {
2176 err = -ENOMEM;
2177 goto unlock;
2178 }
2179
2180 hci_req_init(&req, hdev);
2181
2182 memset(&hci_cp, 0, sizeof(hci_cp));
2183
2184 if (val) {
2185 hci_cp.le = val;
2186 hci_cp.simul = lmp_le_br_capable(hdev);
2187 } else {
2188 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2189 disable_advertising(&req);
2190 }
2191
2192 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2193 &hci_cp);
2194
2195 err = hci_req_run(&req, le_enable_complete);
2196 if (err < 0)
2197 mgmt_pending_remove(cmd);
2198
2199 unlock:
2200 hci_dev_unlock(hdev);
2201 return err;
2202 }
2203
2204 /* This is a helper function to test for pending mgmt commands that can
2205 * cause CoD or EIR HCI commands. We can only allow one such pending
2206 * mgmt command at a time since otherwise we cannot easily track what
2207 * the current values are, will be, and based on that calculate if a new
2208 * HCI command needs to be sent and if yes with what value.
2209 */
2210 static bool pending_eir_or_class(struct hci_dev *hdev)
2211 {
2212 struct pending_cmd *cmd;
2213
2214 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2215 switch (cmd->opcode) {
2216 case MGMT_OP_ADD_UUID:
2217 case MGMT_OP_REMOVE_UUID:
2218 case MGMT_OP_SET_DEV_CLASS:
2219 case MGMT_OP_SET_POWERED:
2220 return true;
2221 }
2222 }
2223
2224 return false;
2225 }
2226
2227 static const u8 bluetooth_base_uuid[] = {
2228 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2229 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2230 };
2231
2232 static u8 get_uuid_size(const u8 *uuid)
2233 {
2234 u32 val;
2235
2236 if (memcmp(uuid, bluetooth_base_uuid, 12))
2237 return 128;
2238
2239 val = get_unaligned_le32(&uuid[12]);
2240 if (val > 0xffff)
2241 return 32;
2242
2243 return 16;
2244 }
2245
2246 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2247 {
2248 struct pending_cmd *cmd;
2249
2250 hci_dev_lock(hdev);
2251
2252 cmd = mgmt_pending_find(mgmt_op, hdev);
2253 if (!cmd)
2254 goto unlock;
2255
2256 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2257 hdev->dev_class, 3);
2258
2259 mgmt_pending_remove(cmd);
2260
2261 unlock:
2262 hci_dev_unlock(hdev);
2263 }
2264
2265 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2266 {
2267 BT_DBG("status 0x%02x", status);
2268
2269 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2270 }
2271
2272 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2273 {
2274 struct mgmt_cp_add_uuid *cp = data;
2275 struct pending_cmd *cmd;
2276 struct hci_request req;
2277 struct bt_uuid *uuid;
2278 int err;
2279
2280 BT_DBG("request for %s", hdev->name);
2281
2282 hci_dev_lock(hdev);
2283
2284 if (pending_eir_or_class(hdev)) {
2285 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2286 MGMT_STATUS_BUSY);
2287 goto failed;
2288 }
2289
2290 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2291 if (!uuid) {
2292 err = -ENOMEM;
2293 goto failed;
2294 }
2295
2296 memcpy(uuid->uuid, cp->uuid, 16);
2297 uuid->svc_hint = cp->svc_hint;
2298 uuid->size = get_uuid_size(cp->uuid);
2299
2300 list_add_tail(&uuid->list, &hdev->uuids);
2301
2302 hci_req_init(&req, hdev);
2303
2304 update_class(&req);
2305 update_eir(&req);
2306
2307 err = hci_req_run(&req, add_uuid_complete);
2308 if (err < 0) {
2309 if (err != -ENODATA)
2310 goto failed;
2311
2312 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2313 hdev->dev_class, 3);
2314 goto failed;
2315 }
2316
2317 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2318 if (!cmd) {
2319 err = -ENOMEM;
2320 goto failed;
2321 }
2322
2323 err = 0;
2324
2325 failed:
2326 hci_dev_unlock(hdev);
2327 return err;
2328 }
2329
2330 static bool enable_service_cache(struct hci_dev *hdev)
2331 {
2332 if (!hdev_is_powered(hdev))
2333 return false;
2334
2335 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2336 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2337 CACHE_TIMEOUT);
2338 return true;
2339 }
2340
2341 return false;
2342 }
2343
2344 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2345 {
2346 BT_DBG("status 0x%02x", status);
2347
2348 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2349 }
2350
2351 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2352 u16 len)
2353 {
2354 struct mgmt_cp_remove_uuid *cp = data;
2355 struct pending_cmd *cmd;
2356 struct bt_uuid *match, *tmp;
2357 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2358 struct hci_request req;
2359 int err, found;
2360
2361 BT_DBG("request for %s", hdev->name);
2362
2363 hci_dev_lock(hdev);
2364
2365 if (pending_eir_or_class(hdev)) {
2366 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2367 MGMT_STATUS_BUSY);
2368 goto unlock;
2369 }
2370
2371 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2372 hci_uuids_clear(hdev);
2373
2374 if (enable_service_cache(hdev)) {
2375 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2376 0, hdev->dev_class, 3);
2377 goto unlock;
2378 }
2379
2380 goto update_class;
2381 }
2382
2383 found = 0;
2384
2385 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2386 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2387 continue;
2388
2389 list_del(&match->list);
2390 kfree(match);
2391 found++;
2392 }
2393
2394 if (found == 0) {
2395 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2396 MGMT_STATUS_INVALID_PARAMS);
2397 goto unlock;
2398 }
2399
2400 update_class:
2401 hci_req_init(&req, hdev);
2402
2403 update_class(&req);
2404 update_eir(&req);
2405
2406 err = hci_req_run(&req, remove_uuid_complete);
2407 if (err < 0) {
2408 if (err != -ENODATA)
2409 goto unlock;
2410
2411 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2412 hdev->dev_class, 3);
2413 goto unlock;
2414 }
2415
2416 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2417 if (!cmd) {
2418 err = -ENOMEM;
2419 goto unlock;
2420 }
2421
2422 err = 0;
2423
2424 unlock:
2425 hci_dev_unlock(hdev);
2426 return err;
2427 }
2428
2429 static void set_class_complete(struct hci_dev *hdev, u8 status)
2430 {
2431 BT_DBG("status 0x%02x", status);
2432
2433 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2434 }
2435
2436 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2437 u16 len)
2438 {
2439 struct mgmt_cp_set_dev_class *cp = data;
2440 struct pending_cmd *cmd;
2441 struct hci_request req;
2442 int err;
2443
2444 BT_DBG("request for %s", hdev->name);
2445
2446 if (!lmp_bredr_capable(hdev))
2447 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2448 MGMT_STATUS_NOT_SUPPORTED);
2449
2450 hci_dev_lock(hdev);
2451
2452 if (pending_eir_or_class(hdev)) {
2453 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2454 MGMT_STATUS_BUSY);
2455 goto unlock;
2456 }
2457
2458 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2459 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2460 MGMT_STATUS_INVALID_PARAMS);
2461 goto unlock;
2462 }
2463
2464 hdev->major_class = cp->major;
2465 hdev->minor_class = cp->minor;
2466
2467 if (!hdev_is_powered(hdev)) {
2468 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2469 hdev->dev_class, 3);
2470 goto unlock;
2471 }
2472
2473 hci_req_init(&req, hdev);
2474
2475 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2476 hci_dev_unlock(hdev);
2477 cancel_delayed_work_sync(&hdev->service_cache);
2478 hci_dev_lock(hdev);
2479 update_eir(&req);
2480 }
2481
2482 update_class(&req);
2483
2484 err = hci_req_run(&req, set_class_complete);
2485 if (err < 0) {
2486 if (err != -ENODATA)
2487 goto unlock;
2488
2489 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2490 hdev->dev_class, 3);
2491 goto unlock;
2492 }
2493
2494 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2495 if (!cmd) {
2496 err = -ENOMEM;
2497 goto unlock;
2498 }
2499
2500 err = 0;
2501
2502 unlock:
2503 hci_dev_unlock(hdev);
2504 return err;
2505 }
2506
2507 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2508 u16 len)
2509 {
2510 struct mgmt_cp_load_link_keys *cp = data;
2511 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2512 sizeof(struct mgmt_link_key_info));
2513 u16 key_count, expected_len;
2514 bool changed;
2515 int i;
2516
2517 BT_DBG("request for %s", hdev->name);
2518
2519 if (!lmp_bredr_capable(hdev))
2520 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2521 MGMT_STATUS_NOT_SUPPORTED);
2522
2523 key_count = __le16_to_cpu(cp->key_count);
2524 if (key_count > max_key_count) {
2525 BT_ERR("load_link_keys: too big key_count value %u",
2526 key_count);
2527 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2528 MGMT_STATUS_INVALID_PARAMS);
2529 }
2530
2531 expected_len = sizeof(*cp) + key_count *
2532 sizeof(struct mgmt_link_key_info);
2533 if (expected_len != len) {
2534 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2535 expected_len, len);
2536 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2537 MGMT_STATUS_INVALID_PARAMS);
2538 }
2539
2540 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2541 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2542 MGMT_STATUS_INVALID_PARAMS);
2543
2544 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2545 key_count);
2546
2547 for (i = 0; i < key_count; i++) {
2548 struct mgmt_link_key_info *key = &cp->keys[i];
2549
2550 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2551 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2552 MGMT_STATUS_INVALID_PARAMS);
2553 }
2554
2555 hci_dev_lock(hdev);
2556
2557 hci_link_keys_clear(hdev);
2558
2559 if (cp->debug_keys)
2560 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2561 &hdev->dev_flags);
2562 else
2563 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2564 &hdev->dev_flags);
2565
2566 if (changed)
2567 new_settings(hdev, NULL);
2568
2569 for (i = 0; i < key_count; i++) {
2570 struct mgmt_link_key_info *key = &cp->keys[i];
2571
2572 /* Always ignore debug keys and require a new pairing if
2573 * the user wants to use them.
2574 */
2575 if (key->type == HCI_LK_DEBUG_COMBINATION)
2576 continue;
2577
2578 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2579 key->type, key->pin_len, NULL);
2580 }
2581
2582 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2583
2584 hci_dev_unlock(hdev);
2585
2586 return 0;
2587 }
2588
2589 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2590 u8 addr_type, struct sock *skip_sk)
2591 {
2592 struct mgmt_ev_device_unpaired ev;
2593
2594 bacpy(&ev.addr.bdaddr, bdaddr);
2595 ev.addr.type = addr_type;
2596
2597 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2598 skip_sk);
2599 }
2600
2601 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2602 u16 len)
2603 {
2604 struct mgmt_cp_unpair_device *cp = data;
2605 struct mgmt_rp_unpair_device rp;
2606 struct hci_cp_disconnect dc;
2607 struct pending_cmd *cmd;
2608 struct hci_conn *conn;
2609 int err;
2610
2611 memset(&rp, 0, sizeof(rp));
2612 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2613 rp.addr.type = cp->addr.type;
2614
2615 if (!bdaddr_type_is_valid(cp->addr.type))
2616 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2617 MGMT_STATUS_INVALID_PARAMS,
2618 &rp, sizeof(rp));
2619
2620 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2621 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2622 MGMT_STATUS_INVALID_PARAMS,
2623 &rp, sizeof(rp));
2624
2625 hci_dev_lock(hdev);
2626
2627 if (!hdev_is_powered(hdev)) {
2628 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2629 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2630 goto unlock;
2631 }
2632
2633 if (cp->addr.type == BDADDR_BREDR) {
2634 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2635 } else {
2636 u8 addr_type;
2637
2638 if (cp->addr.type == BDADDR_LE_PUBLIC)
2639 addr_type = ADDR_LE_DEV_PUBLIC;
2640 else
2641 addr_type = ADDR_LE_DEV_RANDOM;
2642
2643 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2644
2645 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2646
2647 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2648 }
2649
2650 if (err < 0) {
2651 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2652 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2653 goto unlock;
2654 }
2655
2656 if (cp->disconnect) {
2657 if (cp->addr.type == BDADDR_BREDR)
2658 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2659 &cp->addr.bdaddr);
2660 else
2661 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2662 &cp->addr.bdaddr);
2663 } else {
2664 conn = NULL;
2665 }
2666
2667 if (!conn) {
2668 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2669 &rp, sizeof(rp));
2670 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2671 goto unlock;
2672 }
2673
2674 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2675 sizeof(*cp));
2676 if (!cmd) {
2677 err = -ENOMEM;
2678 goto unlock;
2679 }
2680
2681 dc.handle = cpu_to_le16(conn->handle);
2682 dc.reason = 0x13; /* Remote User Terminated Connection */
2683 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2684 if (err < 0)
2685 mgmt_pending_remove(cmd);
2686
2687 unlock:
2688 hci_dev_unlock(hdev);
2689 return err;
2690 }
2691
2692 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2693 u16 len)
2694 {
2695 struct mgmt_cp_disconnect *cp = data;
2696 struct mgmt_rp_disconnect rp;
2697 struct hci_cp_disconnect dc;
2698 struct pending_cmd *cmd;
2699 struct hci_conn *conn;
2700 int err;
2701
2702 BT_DBG("");
2703
2704 memset(&rp, 0, sizeof(rp));
2705 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2706 rp.addr.type = cp->addr.type;
2707
2708 if (!bdaddr_type_is_valid(cp->addr.type))
2709 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2710 MGMT_STATUS_INVALID_PARAMS,
2711 &rp, sizeof(rp));
2712
2713 hci_dev_lock(hdev);
2714
2715 if (!test_bit(HCI_UP, &hdev->flags)) {
2716 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2717 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2718 goto failed;
2719 }
2720
2721 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2723 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2724 goto failed;
2725 }
2726
2727 if (cp->addr.type == BDADDR_BREDR)
2728 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2729 &cp->addr.bdaddr);
2730 else
2731 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2732
2733 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2734 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2735 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2736 goto failed;
2737 }
2738
2739 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2740 if (!cmd) {
2741 err = -ENOMEM;
2742 goto failed;
2743 }
2744
2745 dc.handle = cpu_to_le16(conn->handle);
2746 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2747
2748 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2749 if (err < 0)
2750 mgmt_pending_remove(cmd);
2751
2752 failed:
2753 hci_dev_unlock(hdev);
2754 return err;
2755 }
2756
2757 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2758 {
2759 switch (link_type) {
2760 case LE_LINK:
2761 switch (addr_type) {
2762 case ADDR_LE_DEV_PUBLIC:
2763 return BDADDR_LE_PUBLIC;
2764
2765 default:
2766 /* Fallback to LE Random address type */
2767 return BDADDR_LE_RANDOM;
2768 }
2769
2770 default:
2771 /* Fallback to BR/EDR type */
2772 return BDADDR_BREDR;
2773 }
2774 }
2775
2776 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2777 u16 data_len)
2778 {
2779 struct mgmt_rp_get_connections *rp;
2780 struct hci_conn *c;
2781 size_t rp_len;
2782 int err;
2783 u16 i;
2784
2785 BT_DBG("");
2786
2787 hci_dev_lock(hdev);
2788
2789 if (!hdev_is_powered(hdev)) {
2790 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2791 MGMT_STATUS_NOT_POWERED);
2792 goto unlock;
2793 }
2794
2795 i = 0;
2796 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2797 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2798 i++;
2799 }
2800
2801 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2802 rp = kmalloc(rp_len, GFP_KERNEL);
2803 if (!rp) {
2804 err = -ENOMEM;
2805 goto unlock;
2806 }
2807
2808 i = 0;
2809 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2810 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2811 continue;
2812 bacpy(&rp->addr[i].bdaddr, &c->dst);
2813 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2814 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2815 continue;
2816 i++;
2817 }
2818
2819 rp->conn_count = cpu_to_le16(i);
2820
2821 /* Recalculate length in case of filtered SCO connections, etc */
2822 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2823
2824 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2825 rp_len);
2826
2827 kfree(rp);
2828
2829 unlock:
2830 hci_dev_unlock(hdev);
2831 return err;
2832 }
2833
2834 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2835 struct mgmt_cp_pin_code_neg_reply *cp)
2836 {
2837 struct pending_cmd *cmd;
2838 int err;
2839
2840 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2841 sizeof(*cp));
2842 if (!cmd)
2843 return -ENOMEM;
2844
2845 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2846 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2847 if (err < 0)
2848 mgmt_pending_remove(cmd);
2849
2850 return err;
2851 }
2852
2853 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2854 u16 len)
2855 {
2856 struct hci_conn *conn;
2857 struct mgmt_cp_pin_code_reply *cp = data;
2858 struct hci_cp_pin_code_reply reply;
2859 struct pending_cmd *cmd;
2860 int err;
2861
2862 BT_DBG("");
2863
2864 hci_dev_lock(hdev);
2865
2866 if (!hdev_is_powered(hdev)) {
2867 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2868 MGMT_STATUS_NOT_POWERED);
2869 goto failed;
2870 }
2871
2872 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2873 if (!conn) {
2874 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2875 MGMT_STATUS_NOT_CONNECTED);
2876 goto failed;
2877 }
2878
2879 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2880 struct mgmt_cp_pin_code_neg_reply ncp;
2881
2882 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2883
2884 BT_ERR("PIN code is not 16 bytes long");
2885
2886 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2887 if (err >= 0)
2888 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2889 MGMT_STATUS_INVALID_PARAMS);
2890
2891 goto failed;
2892 }
2893
2894 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2895 if (!cmd) {
2896 err = -ENOMEM;
2897 goto failed;
2898 }
2899
2900 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2901 reply.pin_len = cp->pin_len;
2902 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2903
2904 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2905 if (err < 0)
2906 mgmt_pending_remove(cmd);
2907
2908 failed:
2909 hci_dev_unlock(hdev);
2910 return err;
2911 }
2912
2913 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2914 u16 len)
2915 {
2916 struct mgmt_cp_set_io_capability *cp = data;
2917
2918 BT_DBG("");
2919
2920 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2921 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2922 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2923
2924 hci_dev_lock(hdev);
2925
2926 hdev->io_capability = cp->io_capability;
2927
2928 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2929 hdev->io_capability);
2930
2931 hci_dev_unlock(hdev);
2932
2933 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2934 0);
2935 }
2936
2937 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2938 {
2939 struct hci_dev *hdev = conn->hdev;
2940 struct pending_cmd *cmd;
2941
2942 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2943 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2944 continue;
2945
2946 if (cmd->user_data != conn)
2947 continue;
2948
2949 return cmd;
2950 }
2951
2952 return NULL;
2953 }
2954
2955 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2956 {
2957 struct mgmt_rp_pair_device rp;
2958 struct hci_conn *conn = cmd->user_data;
2959
2960 bacpy(&rp.addr.bdaddr, &conn->dst);
2961 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2962
2963 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2964 &rp, sizeof(rp));
2965
2966 /* So we don't get further callbacks for this connection */
2967 conn->connect_cfm_cb = NULL;
2968 conn->security_cfm_cb = NULL;
2969 conn->disconn_cfm_cb = NULL;
2970
2971 hci_conn_drop(conn);
2972
2973 mgmt_pending_remove(cmd);
2974 }
2975
2976 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2977 {
2978 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2979 struct pending_cmd *cmd;
2980
2981 cmd = find_pairing(conn);
2982 if (cmd)
2983 pairing_complete(cmd, status);
2984 }
2985
2986 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2987 {
2988 struct pending_cmd *cmd;
2989
2990 BT_DBG("status %u", status);
2991
2992 cmd = find_pairing(conn);
2993 if (!cmd)
2994 BT_DBG("Unable to find a pending command");
2995 else
2996 pairing_complete(cmd, mgmt_status(status));
2997 }
2998
2999 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3000 {
3001 struct pending_cmd *cmd;
3002
3003 BT_DBG("status %u", status);
3004
3005 if (!status)
3006 return;
3007
3008 cmd = find_pairing(conn);
3009 if (!cmd)
3010 BT_DBG("Unable to find a pending command");
3011 else
3012 pairing_complete(cmd, mgmt_status(status));
3013 }
3014
3015 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3016 u16 len)
3017 {
3018 struct mgmt_cp_pair_device *cp = data;
3019 struct mgmt_rp_pair_device rp;
3020 struct pending_cmd *cmd;
3021 u8 sec_level, auth_type;
3022 struct hci_conn *conn;
3023 int err;
3024
3025 BT_DBG("");
3026
3027 memset(&rp, 0, sizeof(rp));
3028 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3029 rp.addr.type = cp->addr.type;
3030
3031 if (!bdaddr_type_is_valid(cp->addr.type))
3032 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3033 MGMT_STATUS_INVALID_PARAMS,
3034 &rp, sizeof(rp));
3035
3036 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3037 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3038 MGMT_STATUS_INVALID_PARAMS,
3039 &rp, sizeof(rp));
3040
3041 hci_dev_lock(hdev);
3042
3043 if (!hdev_is_powered(hdev)) {
3044 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3045 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3046 goto unlock;
3047 }
3048
3049 sec_level = BT_SECURITY_MEDIUM;
3050 auth_type = HCI_AT_DEDICATED_BONDING;
3051
3052 if (cp->addr.type == BDADDR_BREDR) {
3053 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3054 auth_type);
3055 } else {
3056 u8 addr_type;
3057
3058 /* Convert from L2CAP channel address type to HCI address type
3059 */
3060 if (cp->addr.type == BDADDR_LE_PUBLIC)
3061 addr_type = ADDR_LE_DEV_PUBLIC;
3062 else
3063 addr_type = ADDR_LE_DEV_RANDOM;
3064
3065 /* When pairing a new device, it is expected to remember
3066 * this device for future connections. Adding the connection
3067 * parameter information ahead of time allows tracking
3068 * of the slave preferred values and will speed up any
3069 * further connection establishment.
3070 *
3071 * If connection parameters already exist, then they
3072 * will be kept and this function does nothing.
3073 */
3074 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3075
3076 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3077 sec_level, auth_type);
3078 }
3079
3080 if (IS_ERR(conn)) {
3081 int status;
3082
3083 if (PTR_ERR(conn) == -EBUSY)
3084 status = MGMT_STATUS_BUSY;
3085 else
3086 status = MGMT_STATUS_CONNECT_FAILED;
3087
3088 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 status, &rp,
3090 sizeof(rp));
3091 goto unlock;
3092 }
3093
3094 if (conn->connect_cfm_cb) {
3095 hci_conn_drop(conn);
3096 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3097 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3098 goto unlock;
3099 }
3100
3101 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3102 if (!cmd) {
3103 err = -ENOMEM;
3104 hci_conn_drop(conn);
3105 goto unlock;
3106 }
3107
3108 /* For LE, just connecting isn't a proof that the pairing finished */
3109 if (cp->addr.type == BDADDR_BREDR) {
3110 conn->connect_cfm_cb = pairing_complete_cb;
3111 conn->security_cfm_cb = pairing_complete_cb;
3112 conn->disconn_cfm_cb = pairing_complete_cb;
3113 } else {
3114 conn->connect_cfm_cb = le_pairing_complete_cb;
3115 conn->security_cfm_cb = le_pairing_complete_cb;
3116 conn->disconn_cfm_cb = le_pairing_complete_cb;
3117 }
3118
3119 conn->io_capability = cp->io_cap;
3120 cmd->user_data = conn;
3121
3122 if (conn->state == BT_CONNECTED &&
3123 hci_conn_security(conn, sec_level, auth_type))
3124 pairing_complete(cmd, 0);
3125
3126 err = 0;
3127
3128 unlock:
3129 hci_dev_unlock(hdev);
3130 return err;
3131 }
3132
3133 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3134 u16 len)
3135 {
3136 struct mgmt_addr_info *addr = data;
3137 struct pending_cmd *cmd;
3138 struct hci_conn *conn;
3139 int err;
3140
3141 BT_DBG("");
3142
3143 hci_dev_lock(hdev);
3144
3145 if (!hdev_is_powered(hdev)) {
3146 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3147 MGMT_STATUS_NOT_POWERED);
3148 goto unlock;
3149 }
3150
3151 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3152 if (!cmd) {
3153 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3154 MGMT_STATUS_INVALID_PARAMS);
3155 goto unlock;
3156 }
3157
3158 conn = cmd->user_data;
3159
3160 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3161 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3162 MGMT_STATUS_INVALID_PARAMS);
3163 goto unlock;
3164 }
3165
3166 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3167
3168 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3169 addr, sizeof(*addr));
3170 unlock:
3171 hci_dev_unlock(hdev);
3172 return err;
3173 }
3174
3175 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3176 struct mgmt_addr_info *addr, u16 mgmt_op,
3177 u16 hci_op, __le32 passkey)
3178 {
3179 struct pending_cmd *cmd;
3180 struct hci_conn *conn;
3181 int err;
3182
3183 hci_dev_lock(hdev);
3184
3185 if (!hdev_is_powered(hdev)) {
3186 err = cmd_complete(sk, hdev->id, mgmt_op,
3187 MGMT_STATUS_NOT_POWERED, addr,
3188 sizeof(*addr));
3189 goto done;
3190 }
3191
3192 if (addr->type == BDADDR_BREDR)
3193 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3194 else
3195 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3196
3197 if (!conn) {
3198 err = cmd_complete(sk, hdev->id, mgmt_op,
3199 MGMT_STATUS_NOT_CONNECTED, addr,
3200 sizeof(*addr));
3201 goto done;
3202 }
3203
3204 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3205 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3206 if (!err)
3207 err = cmd_complete(sk, hdev->id, mgmt_op,
3208 MGMT_STATUS_SUCCESS, addr,
3209 sizeof(*addr));
3210 else
3211 err = cmd_complete(sk, hdev->id, mgmt_op,
3212 MGMT_STATUS_FAILED, addr,
3213 sizeof(*addr));
3214
3215 goto done;
3216 }
3217
3218 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3219 if (!cmd) {
3220 err = -ENOMEM;
3221 goto done;
3222 }
3223
3224 /* Continue with pairing via HCI */
3225 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3226 struct hci_cp_user_passkey_reply cp;
3227
3228 bacpy(&cp.bdaddr, &addr->bdaddr);
3229 cp.passkey = passkey;
3230 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3231 } else
3232 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3233 &addr->bdaddr);
3234
3235 if (err < 0)
3236 mgmt_pending_remove(cmd);
3237
3238 done:
3239 hci_dev_unlock(hdev);
3240 return err;
3241 }
3242
3243 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3244 void *data, u16 len)
3245 {
3246 struct mgmt_cp_pin_code_neg_reply *cp = data;
3247
3248 BT_DBG("");
3249
3250 return user_pairing_resp(sk, hdev, &cp->addr,
3251 MGMT_OP_PIN_CODE_NEG_REPLY,
3252 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3253 }
3254
3255 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3256 u16 len)
3257 {
3258 struct mgmt_cp_user_confirm_reply *cp = data;
3259
3260 BT_DBG("");
3261
3262 if (len != sizeof(*cp))
3263 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3264 MGMT_STATUS_INVALID_PARAMS);
3265
3266 return user_pairing_resp(sk, hdev, &cp->addr,
3267 MGMT_OP_USER_CONFIRM_REPLY,
3268 HCI_OP_USER_CONFIRM_REPLY, 0);
3269 }
3270
3271 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3272 void *data, u16 len)
3273 {
3274 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3275
3276 BT_DBG("");
3277
3278 return user_pairing_resp(sk, hdev, &cp->addr,
3279 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3280 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3281 }
3282
3283 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3284 u16 len)
3285 {
3286 struct mgmt_cp_user_passkey_reply *cp = data;
3287
3288 BT_DBG("");
3289
3290 return user_pairing_resp(sk, hdev, &cp->addr,
3291 MGMT_OP_USER_PASSKEY_REPLY,
3292 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3293 }
3294
3295 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3296 void *data, u16 len)
3297 {
3298 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3299
3300 BT_DBG("");
3301
3302 return user_pairing_resp(sk, hdev, &cp->addr,
3303 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3304 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3305 }
3306
3307 static void update_name(struct hci_request *req)
3308 {
3309 struct hci_dev *hdev = req->hdev;
3310 struct hci_cp_write_local_name cp;
3311
3312 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3313
3314 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3315 }
3316
3317 static void set_name_complete(struct hci_dev *hdev, u8 status)
3318 {
3319 struct mgmt_cp_set_local_name *cp;
3320 struct pending_cmd *cmd;
3321
3322 BT_DBG("status 0x%02x", status);
3323
3324 hci_dev_lock(hdev);
3325
3326 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3327 if (!cmd)
3328 goto unlock;
3329
3330 cp = cmd->param;
3331
3332 if (status)
3333 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3334 mgmt_status(status));
3335 else
3336 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3337 cp, sizeof(*cp));
3338
3339 mgmt_pending_remove(cmd);
3340
3341 unlock:
3342 hci_dev_unlock(hdev);
3343 }
3344
3345 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3346 u16 len)
3347 {
3348 struct mgmt_cp_set_local_name *cp = data;
3349 struct pending_cmd *cmd;
3350 struct hci_request req;
3351 int err;
3352
3353 BT_DBG("");
3354
3355 hci_dev_lock(hdev);
3356
3357 /* If the old values are the same as the new ones just return a
3358 * direct command complete event.
3359 */
3360 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3361 !memcmp(hdev->short_name, cp->short_name,
3362 sizeof(hdev->short_name))) {
3363 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3364 data, len);
3365 goto failed;
3366 }
3367
3368 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3369
3370 if (!hdev_is_powered(hdev)) {
3371 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3372
3373 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3374 data, len);
3375 if (err < 0)
3376 goto failed;
3377
3378 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3379 sk);
3380
3381 goto failed;
3382 }
3383
3384 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3385 if (!cmd) {
3386 err = -ENOMEM;
3387 goto failed;
3388 }
3389
3390 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3391
3392 hci_req_init(&req, hdev);
3393
3394 if (lmp_bredr_capable(hdev)) {
3395 update_name(&req);
3396 update_eir(&req);
3397 }
3398
3399 /* The name is stored in the scan response data and so
3400 * no need to udpate the advertising data here.
3401 */
3402 if (lmp_le_capable(hdev))
3403 update_scan_rsp_data(&req);
3404
3405 err = hci_req_run(&req, set_name_complete);
3406 if (err < 0)
3407 mgmt_pending_remove(cmd);
3408
3409 failed:
3410 hci_dev_unlock(hdev);
3411 return err;
3412 }
3413
3414 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 data_len)
3416 {
3417 struct pending_cmd *cmd;
3418 int err;
3419
3420 BT_DBG("%s", hdev->name);
3421
3422 hci_dev_lock(hdev);
3423
3424 if (!hdev_is_powered(hdev)) {
3425 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3426 MGMT_STATUS_NOT_POWERED);
3427 goto unlock;
3428 }
3429
3430 if (!lmp_ssp_capable(hdev)) {
3431 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3432 MGMT_STATUS_NOT_SUPPORTED);
3433 goto unlock;
3434 }
3435
3436 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3437 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3438 MGMT_STATUS_BUSY);
3439 goto unlock;
3440 }
3441
3442 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3443 if (!cmd) {
3444 err = -ENOMEM;
3445 goto unlock;
3446 }
3447
3448 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3449 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3450 0, NULL);
3451 else
3452 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3453
3454 if (err < 0)
3455 mgmt_pending_remove(cmd);
3456
3457 unlock:
3458 hci_dev_unlock(hdev);
3459 return err;
3460 }
3461
3462 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3464 {
3465 int err;
3466
3467 BT_DBG("%s ", hdev->name);
3468
3469 hci_dev_lock(hdev);
3470
3471 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3472 struct mgmt_cp_add_remote_oob_data *cp = data;
3473 u8 status;
3474
3475 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3476 cp->hash, cp->randomizer);
3477 if (err < 0)
3478 status = MGMT_STATUS_FAILED;
3479 else
3480 status = MGMT_STATUS_SUCCESS;
3481
3482 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3483 status, &cp->addr, sizeof(cp->addr));
3484 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3485 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3486 u8 status;
3487
3488 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3489 cp->hash192,
3490 cp->randomizer192,
3491 cp->hash256,
3492 cp->randomizer256);
3493 if (err < 0)
3494 status = MGMT_STATUS_FAILED;
3495 else
3496 status = MGMT_STATUS_SUCCESS;
3497
3498 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3499 status, &cp->addr, sizeof(cp->addr));
3500 } else {
3501 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3502 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3503 MGMT_STATUS_INVALID_PARAMS);
3504 }
3505
3506 hci_dev_unlock(hdev);
3507 return err;
3508 }
3509
3510 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3511 void *data, u16 len)
3512 {
3513 struct mgmt_cp_remove_remote_oob_data *cp = data;
3514 u8 status;
3515 int err;
3516
3517 BT_DBG("%s", hdev->name);
3518
3519 hci_dev_lock(hdev);
3520
3521 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3522 if (err < 0)
3523 status = MGMT_STATUS_INVALID_PARAMS;
3524 else
3525 status = MGMT_STATUS_SUCCESS;
3526
3527 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3528 status, &cp->addr, sizeof(cp->addr));
3529
3530 hci_dev_unlock(hdev);
3531 return err;
3532 }
3533
3534 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3535 {
3536 struct pending_cmd *cmd;
3537 u8 type;
3538 int err;
3539
3540 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3541
3542 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3543 if (!cmd)
3544 return -ENOENT;
3545
3546 type = hdev->discovery.type;
3547
3548 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3549 &type, sizeof(type));
3550 mgmt_pending_remove(cmd);
3551
3552 return err;
3553 }
3554
3555 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3556 {
3557 unsigned long timeout = 0;
3558
3559 BT_DBG("status %d", status);
3560
3561 if (status) {
3562 hci_dev_lock(hdev);
3563 mgmt_start_discovery_failed(hdev, status);
3564 hci_dev_unlock(hdev);
3565 return;
3566 }
3567
3568 hci_dev_lock(hdev);
3569 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3570 hci_dev_unlock(hdev);
3571
3572 switch (hdev->discovery.type) {
3573 case DISCOV_TYPE_LE:
3574 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3575 break;
3576
3577 case DISCOV_TYPE_INTERLEAVED:
3578 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3579 break;
3580
3581 case DISCOV_TYPE_BREDR:
3582 break;
3583
3584 default:
3585 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3586 }
3587
3588 if (!timeout)
3589 return;
3590
3591 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3592 }
3593
3594 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3595 void *data, u16 len)
3596 {
3597 struct mgmt_cp_start_discovery *cp = data;
3598 struct pending_cmd *cmd;
3599 struct hci_cp_le_set_scan_param param_cp;
3600 struct hci_cp_le_set_scan_enable enable_cp;
3601 struct hci_cp_inquiry inq_cp;
3602 struct hci_request req;
3603 /* General inquiry access code (GIAC) */
3604 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3605 u8 status, own_addr_type;
3606 int err;
3607
3608 BT_DBG("%s", hdev->name);
3609
3610 hci_dev_lock(hdev);
3611
3612 if (!hdev_is_powered(hdev)) {
3613 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3614 MGMT_STATUS_NOT_POWERED);
3615 goto failed;
3616 }
3617
3618 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3619 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3620 MGMT_STATUS_BUSY);
3621 goto failed;
3622 }
3623
3624 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3625 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3626 MGMT_STATUS_BUSY);
3627 goto failed;
3628 }
3629
3630 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3631 if (!cmd) {
3632 err = -ENOMEM;
3633 goto failed;
3634 }
3635
3636 hdev->discovery.type = cp->type;
3637
3638 hci_req_init(&req, hdev);
3639
3640 switch (hdev->discovery.type) {
3641 case DISCOV_TYPE_BREDR:
3642 status = mgmt_bredr_support(hdev);
3643 if (status) {
3644 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3645 status);
3646 mgmt_pending_remove(cmd);
3647 goto failed;
3648 }
3649
3650 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3651 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3652 MGMT_STATUS_BUSY);
3653 mgmt_pending_remove(cmd);
3654 goto failed;
3655 }
3656
3657 hci_inquiry_cache_flush(hdev);
3658
3659 memset(&inq_cp, 0, sizeof(inq_cp));
3660 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3661 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3662 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3663 break;
3664
3665 case DISCOV_TYPE_LE:
3666 case DISCOV_TYPE_INTERLEAVED:
3667 status = mgmt_le_support(hdev);
3668 if (status) {
3669 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3670 status);
3671 mgmt_pending_remove(cmd);
3672 goto failed;
3673 }
3674
3675 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3676 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3677 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3678 MGMT_STATUS_NOT_SUPPORTED);
3679 mgmt_pending_remove(cmd);
3680 goto failed;
3681 }
3682
3683 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3684 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3685 MGMT_STATUS_REJECTED);
3686 mgmt_pending_remove(cmd);
3687 goto failed;
3688 }
3689
3690 /* If controller is scanning, it means the background scanning
3691 * is running. Thus, we should temporarily stop it in order to
3692 * set the discovery scanning parameters.
3693 */
3694 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3695 hci_req_add_le_scan_disable(&req);
3696
3697 memset(&param_cp, 0, sizeof(param_cp));
3698
3699 /* All active scans will be done with either a resolvable
3700 * private address (when privacy feature has been enabled)
3701 * or unresolvable private address.
3702 */
3703 err = hci_update_random_address(&req, true, &own_addr_type);
3704 if (err < 0) {
3705 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3706 MGMT_STATUS_FAILED);
3707 mgmt_pending_remove(cmd);
3708 goto failed;
3709 }
3710
3711 param_cp.type = LE_SCAN_ACTIVE;
3712 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3713 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3714 param_cp.own_address_type = own_addr_type;
3715 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3716 &param_cp);
3717
3718 memset(&enable_cp, 0, sizeof(enable_cp));
3719 enable_cp.enable = LE_SCAN_ENABLE;
3720 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3721 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3722 &enable_cp);
3723 break;
3724
3725 default:
3726 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3727 MGMT_STATUS_INVALID_PARAMS);
3728 mgmt_pending_remove(cmd);
3729 goto failed;
3730 }
3731
3732 err = hci_req_run(&req, start_discovery_complete);
3733 if (err < 0)
3734 mgmt_pending_remove(cmd);
3735 else
3736 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3737
3738 failed:
3739 hci_dev_unlock(hdev);
3740 return err;
3741 }
3742
3743 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3744 {
3745 struct pending_cmd *cmd;
3746 int err;
3747
3748 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3749 if (!cmd)
3750 return -ENOENT;
3751
3752 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3753 &hdev->discovery.type, sizeof(hdev->discovery.type));
3754 mgmt_pending_remove(cmd);
3755
3756 return err;
3757 }
3758
3759 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3760 {
3761 BT_DBG("status %d", status);
3762
3763 hci_dev_lock(hdev);
3764
3765 if (status) {
3766 mgmt_stop_discovery_failed(hdev, status);
3767 goto unlock;
3768 }
3769
3770 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3771
3772 unlock:
3773 hci_dev_unlock(hdev);
3774 }
3775
3776 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3777 u16 len)
3778 {
3779 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3780 struct pending_cmd *cmd;
3781 struct hci_request req;
3782 int err;
3783
3784 BT_DBG("%s", hdev->name);
3785
3786 hci_dev_lock(hdev);
3787
3788 if (!hci_discovery_active(hdev)) {
3789 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3790 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3791 sizeof(mgmt_cp->type));
3792 goto unlock;
3793 }
3794
3795 if (hdev->discovery.type != mgmt_cp->type) {
3796 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3797 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3798 sizeof(mgmt_cp->type));
3799 goto unlock;
3800 }
3801
3802 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3803 if (!cmd) {
3804 err = -ENOMEM;
3805 goto unlock;
3806 }
3807
3808 hci_req_init(&req, hdev);
3809
3810 hci_stop_discovery(&req);
3811
3812 err = hci_req_run(&req, stop_discovery_complete);
3813 if (!err) {
3814 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3815 goto unlock;
3816 }
3817
3818 mgmt_pending_remove(cmd);
3819
3820 /* If no HCI commands were sent we're done */
3821 if (err == -ENODATA) {
3822 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3823 &mgmt_cp->type, sizeof(mgmt_cp->type));
3824 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3825 }
3826
3827 unlock:
3828 hci_dev_unlock(hdev);
3829 return err;
3830 }
3831
3832 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3833 u16 len)
3834 {
3835 struct mgmt_cp_confirm_name *cp = data;
3836 struct inquiry_entry *e;
3837 int err;
3838
3839 BT_DBG("%s", hdev->name);
3840
3841 hci_dev_lock(hdev);
3842
3843 if (!hci_discovery_active(hdev)) {
3844 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3845 MGMT_STATUS_FAILED, &cp->addr,
3846 sizeof(cp->addr));
3847 goto failed;
3848 }
3849
3850 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3851 if (!e) {
3852 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3853 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3854 sizeof(cp->addr));
3855 goto failed;
3856 }
3857
3858 if (cp->name_known) {
3859 e->name_state = NAME_KNOWN;
3860 list_del(&e->list);
3861 } else {
3862 e->name_state = NAME_NEEDED;
3863 hci_inquiry_cache_update_resolve(hdev, e);
3864 }
3865
3866 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3867 sizeof(cp->addr));
3868
3869 failed:
3870 hci_dev_unlock(hdev);
3871 return err;
3872 }
3873
3874 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3875 u16 len)
3876 {
3877 struct mgmt_cp_block_device *cp = data;
3878 u8 status;
3879 int err;
3880
3881 BT_DBG("%s", hdev->name);
3882
3883 if (!bdaddr_type_is_valid(cp->addr.type))
3884 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3885 MGMT_STATUS_INVALID_PARAMS,
3886 &cp->addr, sizeof(cp->addr));
3887
3888 hci_dev_lock(hdev);
3889
3890 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3891 if (err < 0) {
3892 status = MGMT_STATUS_FAILED;
3893 goto done;
3894 }
3895
3896 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3897 sk);
3898 status = MGMT_STATUS_SUCCESS;
3899
3900 done:
3901 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3902 &cp->addr, sizeof(cp->addr));
3903
3904 hci_dev_unlock(hdev);
3905
3906 return err;
3907 }
3908
3909 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3910 u16 len)
3911 {
3912 struct mgmt_cp_unblock_device *cp = data;
3913 u8 status;
3914 int err;
3915
3916 BT_DBG("%s", hdev->name);
3917
3918 if (!bdaddr_type_is_valid(cp->addr.type))
3919 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3920 MGMT_STATUS_INVALID_PARAMS,
3921 &cp->addr, sizeof(cp->addr));
3922
3923 hci_dev_lock(hdev);
3924
3925 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3926 if (err < 0) {
3927 status = MGMT_STATUS_INVALID_PARAMS;
3928 goto done;
3929 }
3930
3931 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3932 sk);
3933 status = MGMT_STATUS_SUCCESS;
3934
3935 done:
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3937 &cp->addr, sizeof(cp->addr));
3938
3939 hci_dev_unlock(hdev);
3940
3941 return err;
3942 }
3943
3944 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3945 u16 len)
3946 {
3947 struct mgmt_cp_set_device_id *cp = data;
3948 struct hci_request req;
3949 int err;
3950 __u16 source;
3951
3952 BT_DBG("%s", hdev->name);
3953
3954 source = __le16_to_cpu(cp->source);
3955
3956 if (source > 0x0002)
3957 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3958 MGMT_STATUS_INVALID_PARAMS);
3959
3960 hci_dev_lock(hdev);
3961
3962 hdev->devid_source = source;
3963 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3964 hdev->devid_product = __le16_to_cpu(cp->product);
3965 hdev->devid_version = __le16_to_cpu(cp->version);
3966
3967 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3968
3969 hci_req_init(&req, hdev);
3970 update_eir(&req);
3971 hci_req_run(&req, NULL);
3972
3973 hci_dev_unlock(hdev);
3974
3975 return err;
3976 }
3977
3978 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3979 {
3980 struct cmd_lookup match = { NULL, hdev };
3981
3982 if (status) {
3983 u8 mgmt_err = mgmt_status(status);
3984
3985 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3986 cmd_status_rsp, &mgmt_err);
3987 return;
3988 }
3989
3990 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3991 &match);
3992
3993 new_settings(hdev, match.sk);
3994
3995 if (match.sk)
3996 sock_put(match.sk);
3997 }
3998
3999 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4000 u16 len)
4001 {
4002 struct mgmt_mode *cp = data;
4003 struct pending_cmd *cmd;
4004 struct hci_request req;
4005 u8 val, enabled, status;
4006 int err;
4007
4008 BT_DBG("request for %s", hdev->name);
4009
4010 status = mgmt_le_support(hdev);
4011 if (status)
4012 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4013 status);
4014
4015 if (cp->val != 0x00 && cp->val != 0x01)
4016 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4017 MGMT_STATUS_INVALID_PARAMS);
4018
4019 hci_dev_lock(hdev);
4020
4021 val = !!cp->val;
4022 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4023
4024 /* The following conditions are ones which mean that we should
4025 * not do any HCI communication but directly send a mgmt
4026 * response to user space (after toggling the flag if
4027 * necessary).
4028 */
4029 if (!hdev_is_powered(hdev) || val == enabled ||
4030 hci_conn_num(hdev, LE_LINK) > 0) {
4031 bool changed = false;
4032
4033 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4034 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4035 changed = true;
4036 }
4037
4038 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4039 if (err < 0)
4040 goto unlock;
4041
4042 if (changed)
4043 err = new_settings(hdev, sk);
4044
4045 goto unlock;
4046 }
4047
4048 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4049 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4050 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4051 MGMT_STATUS_BUSY);
4052 goto unlock;
4053 }
4054
4055 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4056 if (!cmd) {
4057 err = -ENOMEM;
4058 goto unlock;
4059 }
4060
4061 hci_req_init(&req, hdev);
4062
4063 if (val)
4064 enable_advertising(&req);
4065 else
4066 disable_advertising(&req);
4067
4068 err = hci_req_run(&req, set_advertising_complete);
4069 if (err < 0)
4070 mgmt_pending_remove(cmd);
4071
4072 unlock:
4073 hci_dev_unlock(hdev);
4074 return err;
4075 }
4076
4077 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4078 void *data, u16 len)
4079 {
4080 struct mgmt_cp_set_static_address *cp = data;
4081 int err;
4082
4083 BT_DBG("%s", hdev->name);
4084
4085 if (!lmp_le_capable(hdev))
4086 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4087 MGMT_STATUS_NOT_SUPPORTED);
4088
4089 if (hdev_is_powered(hdev))
4090 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4091 MGMT_STATUS_REJECTED);
4092
4093 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4094 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4095 return cmd_status(sk, hdev->id,
4096 MGMT_OP_SET_STATIC_ADDRESS,
4097 MGMT_STATUS_INVALID_PARAMS);
4098
4099 /* Two most significant bits shall be set */
4100 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4101 return cmd_status(sk, hdev->id,
4102 MGMT_OP_SET_STATIC_ADDRESS,
4103 MGMT_STATUS_INVALID_PARAMS);
4104 }
4105
4106 hci_dev_lock(hdev);
4107
4108 bacpy(&hdev->static_addr, &cp->bdaddr);
4109
4110 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4111
4112 hci_dev_unlock(hdev);
4113
4114 return err;
4115 }
4116
4117 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4118 void *data, u16 len)
4119 {
4120 struct mgmt_cp_set_scan_params *cp = data;
4121 __u16 interval, window;
4122 int err;
4123
4124 BT_DBG("%s", hdev->name);
4125
4126 if (!lmp_le_capable(hdev))
4127 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4128 MGMT_STATUS_NOT_SUPPORTED);
4129
4130 interval = __le16_to_cpu(cp->interval);
4131
4132 if (interval < 0x0004 || interval > 0x4000)
4133 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4134 MGMT_STATUS_INVALID_PARAMS);
4135
4136 window = __le16_to_cpu(cp->window);
4137
4138 if (window < 0x0004 || window > 0x4000)
4139 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4140 MGMT_STATUS_INVALID_PARAMS);
4141
4142 if (window > interval)
4143 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4144 MGMT_STATUS_INVALID_PARAMS);
4145
4146 hci_dev_lock(hdev);
4147
4148 hdev->le_scan_interval = interval;
4149 hdev->le_scan_window = window;
4150
4151 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4152
4153 /* If background scan is running, restart it so new parameters are
4154 * loaded.
4155 */
4156 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4157 hdev->discovery.state == DISCOVERY_STOPPED) {
4158 struct hci_request req;
4159
4160 hci_req_init(&req, hdev);
4161
4162 hci_req_add_le_scan_disable(&req);
4163 hci_req_add_le_passive_scan(&req);
4164
4165 hci_req_run(&req, NULL);
4166 }
4167
4168 hci_dev_unlock(hdev);
4169
4170 return err;
4171 }
4172
4173 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4174 {
4175 struct pending_cmd *cmd;
4176
4177 BT_DBG("status 0x%02x", status);
4178
4179 hci_dev_lock(hdev);
4180
4181 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4182 if (!cmd)
4183 goto unlock;
4184
4185 if (status) {
4186 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4187 mgmt_status(status));
4188 } else {
4189 struct mgmt_mode *cp = cmd->param;
4190
4191 if (cp->val)
4192 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4193 else
4194 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4195
4196 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4197 new_settings(hdev, cmd->sk);
4198 }
4199
4200 mgmt_pending_remove(cmd);
4201
4202 unlock:
4203 hci_dev_unlock(hdev);
4204 }
4205
4206 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4207 void *data, u16 len)
4208 {
4209 struct mgmt_mode *cp = data;
4210 struct pending_cmd *cmd;
4211 struct hci_request req;
4212 int err;
4213
4214 BT_DBG("%s", hdev->name);
4215
4216 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4217 hdev->hci_ver < BLUETOOTH_VER_1_2)
4218 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4219 MGMT_STATUS_NOT_SUPPORTED);
4220
4221 if (cp->val != 0x00 && cp->val != 0x01)
4222 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4223 MGMT_STATUS_INVALID_PARAMS);
4224
4225 if (!hdev_is_powered(hdev))
4226 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4227 MGMT_STATUS_NOT_POWERED);
4228
4229 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4230 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4231 MGMT_STATUS_REJECTED);
4232
4233 hci_dev_lock(hdev);
4234
4235 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4236 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4237 MGMT_STATUS_BUSY);
4238 goto unlock;
4239 }
4240
4241 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4242 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4243 hdev);
4244 goto unlock;
4245 }
4246
4247 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4248 data, len);
4249 if (!cmd) {
4250 err = -ENOMEM;
4251 goto unlock;
4252 }
4253
4254 hci_req_init(&req, hdev);
4255
4256 write_fast_connectable(&req, cp->val);
4257
4258 err = hci_req_run(&req, fast_connectable_complete);
4259 if (err < 0) {
4260 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4261 MGMT_STATUS_FAILED);
4262 mgmt_pending_remove(cmd);
4263 }
4264
4265 unlock:
4266 hci_dev_unlock(hdev);
4267
4268 return err;
4269 }
4270
4271 static void set_bredr_scan(struct hci_request *req)
4272 {
4273 struct hci_dev *hdev = req->hdev;
4274 u8 scan = 0;
4275
4276 /* Ensure that fast connectable is disabled. This function will
4277 * not do anything if the page scan parameters are already what
4278 * they should be.
4279 */
4280 write_fast_connectable(req, false);
4281
4282 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4283 scan |= SCAN_PAGE;
4284 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4285 scan |= SCAN_INQUIRY;
4286
4287 if (scan)
4288 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4289 }
4290
4291 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4292 {
4293 struct pending_cmd *cmd;
4294
4295 BT_DBG("status 0x%02x", status);
4296
4297 hci_dev_lock(hdev);
4298
4299 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4300 if (!cmd)
4301 goto unlock;
4302
4303 if (status) {
4304 u8 mgmt_err = mgmt_status(status);
4305
4306 /* We need to restore the flag if related HCI commands
4307 * failed.
4308 */
4309 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4310
4311 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4312 } else {
4313 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4314 new_settings(hdev, cmd->sk);
4315 }
4316
4317 mgmt_pending_remove(cmd);
4318
4319 unlock:
4320 hci_dev_unlock(hdev);
4321 }
4322
4323 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4324 {
4325 struct mgmt_mode *cp = data;
4326 struct pending_cmd *cmd;
4327 struct hci_request req;
4328 int err;
4329
4330 BT_DBG("request for %s", hdev->name);
4331
4332 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4333 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4334 MGMT_STATUS_NOT_SUPPORTED);
4335
4336 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4338 MGMT_STATUS_REJECTED);
4339
4340 if (cp->val != 0x00 && cp->val != 0x01)
4341 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4342 MGMT_STATUS_INVALID_PARAMS);
4343
4344 hci_dev_lock(hdev);
4345
4346 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4347 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4348 goto unlock;
4349 }
4350
4351 if (!hdev_is_powered(hdev)) {
4352 if (!cp->val) {
4353 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4354 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4355 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4356 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4357 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4358 }
4359
4360 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4361
4362 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4363 if (err < 0)
4364 goto unlock;
4365
4366 err = new_settings(hdev, sk);
4367 goto unlock;
4368 }
4369
4370 /* Reject disabling when powered on */
4371 if (!cp->val) {
4372 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4373 MGMT_STATUS_REJECTED);
4374 goto unlock;
4375 }
4376
4377 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4378 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4379 MGMT_STATUS_BUSY);
4380 goto unlock;
4381 }
4382
4383 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4384 if (!cmd) {
4385 err = -ENOMEM;
4386 goto unlock;
4387 }
4388
4389 /* We need to flip the bit already here so that update_adv_data
4390 * generates the correct flags.
4391 */
4392 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4393
4394 hci_req_init(&req, hdev);
4395
4396 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4397 set_bredr_scan(&req);
4398
4399 /* Since only the advertising data flags will change, there
4400 * is no need to update the scan response data.
4401 */
4402 update_adv_data(&req);
4403
4404 err = hci_req_run(&req, set_bredr_complete);
4405 if (err < 0)
4406 mgmt_pending_remove(cmd);
4407
4408 unlock:
4409 hci_dev_unlock(hdev);
4410 return err;
4411 }
4412
4413 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4414 void *data, u16 len)
4415 {
4416 struct mgmt_mode *cp = data;
4417 struct pending_cmd *cmd;
4418 u8 val, status;
4419 int err;
4420
4421 BT_DBG("request for %s", hdev->name);
4422
4423 status = mgmt_bredr_support(hdev);
4424 if (status)
4425 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4426 status);
4427
4428 if (!lmp_sc_capable(hdev) &&
4429 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4430 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4431 MGMT_STATUS_NOT_SUPPORTED);
4432
4433 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4434 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4435 MGMT_STATUS_INVALID_PARAMS);
4436
4437 hci_dev_lock(hdev);
4438
4439 if (!hdev_is_powered(hdev)) {
4440 bool changed;
4441
4442 if (cp->val) {
4443 changed = !test_and_set_bit(HCI_SC_ENABLED,
4444 &hdev->dev_flags);
4445 if (cp->val == 0x02)
4446 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4447 else
4448 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4449 } else {
4450 changed = test_and_clear_bit(HCI_SC_ENABLED,
4451 &hdev->dev_flags);
4452 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4453 }
4454
4455 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4456 if (err < 0)
4457 goto failed;
4458
4459 if (changed)
4460 err = new_settings(hdev, sk);
4461
4462 goto failed;
4463 }
4464
4465 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4466 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4467 MGMT_STATUS_BUSY);
4468 goto failed;
4469 }
4470
4471 val = !!cp->val;
4472
4473 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4474 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4475 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4476 goto failed;
4477 }
4478
4479 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4480 if (!cmd) {
4481 err = -ENOMEM;
4482 goto failed;
4483 }
4484
4485 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4486 if (err < 0) {
4487 mgmt_pending_remove(cmd);
4488 goto failed;
4489 }
4490
4491 if (cp->val == 0x02)
4492 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4493 else
4494 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4495
4496 failed:
4497 hci_dev_unlock(hdev);
4498 return err;
4499 }
4500
4501 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4502 void *data, u16 len)
4503 {
4504 struct mgmt_mode *cp = data;
4505 bool changed, use_changed;
4506 int err;
4507
4508 BT_DBG("request for %s", hdev->name);
4509
4510 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4511 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4512 MGMT_STATUS_INVALID_PARAMS);
4513
4514 hci_dev_lock(hdev);
4515
4516 if (cp->val)
4517 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4518 &hdev->dev_flags);
4519 else
4520 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4521 &hdev->dev_flags);
4522
4523 if (cp->val == 0x02)
4524 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4525 &hdev->dev_flags);
4526 else
4527 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4528 &hdev->dev_flags);
4529
4530 if (hdev_is_powered(hdev) && use_changed &&
4531 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4532 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4533 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4534 sizeof(mode), &mode);
4535 }
4536
4537 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4538 if (err < 0)
4539 goto unlock;
4540
4541 if (changed)
4542 err = new_settings(hdev, sk);
4543
4544 unlock:
4545 hci_dev_unlock(hdev);
4546 return err;
4547 }
4548
4549 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4550 u16 len)
4551 {
4552 struct mgmt_cp_set_privacy *cp = cp_data;
4553 bool changed;
4554 int err;
4555
4556 BT_DBG("request for %s", hdev->name);
4557
4558 if (!lmp_le_capable(hdev))
4559 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4560 MGMT_STATUS_NOT_SUPPORTED);
4561
4562 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4563 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4564 MGMT_STATUS_INVALID_PARAMS);
4565
4566 if (hdev_is_powered(hdev))
4567 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4568 MGMT_STATUS_REJECTED);
4569
4570 hci_dev_lock(hdev);
4571
4572 /* If user space supports this command it is also expected to
4573 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4574 */
4575 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4576
4577 if (cp->privacy) {
4578 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4579 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4580 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4581 } else {
4582 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4583 memset(hdev->irk, 0, sizeof(hdev->irk));
4584 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4585 }
4586
4587 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4588 if (err < 0)
4589 goto unlock;
4590
4591 if (changed)
4592 err = new_settings(hdev, sk);
4593
4594 unlock:
4595 hci_dev_unlock(hdev);
4596 return err;
4597 }
4598
4599 static bool irk_is_valid(struct mgmt_irk_info *irk)
4600 {
4601 switch (irk->addr.type) {
4602 case BDADDR_LE_PUBLIC:
4603 return true;
4604
4605 case BDADDR_LE_RANDOM:
4606 /* Two most significant bits shall be set */
4607 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4608 return false;
4609 return true;
4610 }
4611
4612 return false;
4613 }
4614
4615 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4616 u16 len)
4617 {
4618 struct mgmt_cp_load_irks *cp = cp_data;
4619 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4620 sizeof(struct mgmt_irk_info));
4621 u16 irk_count, expected_len;
4622 int i, err;
4623
4624 BT_DBG("request for %s", hdev->name);
4625
4626 if (!lmp_le_capable(hdev))
4627 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4628 MGMT_STATUS_NOT_SUPPORTED);
4629
4630 irk_count = __le16_to_cpu(cp->irk_count);
4631 if (irk_count > max_irk_count) {
4632 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4633 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4634 MGMT_STATUS_INVALID_PARAMS);
4635 }
4636
4637 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4638 if (expected_len != len) {
4639 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4640 expected_len, len);
4641 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4642 MGMT_STATUS_INVALID_PARAMS);
4643 }
4644
4645 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4646
4647 for (i = 0; i < irk_count; i++) {
4648 struct mgmt_irk_info *key = &cp->irks[i];
4649
4650 if (!irk_is_valid(key))
4651 return cmd_status(sk, hdev->id,
4652 MGMT_OP_LOAD_IRKS,
4653 MGMT_STATUS_INVALID_PARAMS);
4654 }
4655
4656 hci_dev_lock(hdev);
4657
4658 hci_smp_irks_clear(hdev);
4659
4660 for (i = 0; i < irk_count; i++) {
4661 struct mgmt_irk_info *irk = &cp->irks[i];
4662 u8 addr_type;
4663
4664 if (irk->addr.type == BDADDR_LE_PUBLIC)
4665 addr_type = ADDR_LE_DEV_PUBLIC;
4666 else
4667 addr_type = ADDR_LE_DEV_RANDOM;
4668
4669 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4670 BDADDR_ANY);
4671 }
4672
4673 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4674
4675 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4676
4677 hci_dev_unlock(hdev);
4678
4679 return err;
4680 }
4681
4682 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4683 {
4684 if (key->master != 0x00 && key->master != 0x01)
4685 return false;
4686
4687 switch (key->addr.type) {
4688 case BDADDR_LE_PUBLIC:
4689 return true;
4690
4691 case BDADDR_LE_RANDOM:
4692 /* Two most significant bits shall be set */
4693 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4694 return false;
4695 return true;
4696 }
4697
4698 return false;
4699 }
4700
4701 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4702 void *cp_data, u16 len)
4703 {
4704 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4705 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4706 sizeof(struct mgmt_ltk_info));
4707 u16 key_count, expected_len;
4708 int i, err;
4709
4710 BT_DBG("request for %s", hdev->name);
4711
4712 if (!lmp_le_capable(hdev))
4713 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4714 MGMT_STATUS_NOT_SUPPORTED);
4715
4716 key_count = __le16_to_cpu(cp->key_count);
4717 if (key_count > max_key_count) {
4718 BT_ERR("load_ltks: too big key_count value %u", key_count);
4719 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4720 MGMT_STATUS_INVALID_PARAMS);
4721 }
4722
4723 expected_len = sizeof(*cp) + key_count *
4724 sizeof(struct mgmt_ltk_info);
4725 if (expected_len != len) {
4726 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4727 expected_len, len);
4728 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4729 MGMT_STATUS_INVALID_PARAMS);
4730 }
4731
4732 BT_DBG("%s key_count %u", hdev->name, key_count);
4733
4734 for (i = 0; i < key_count; i++) {
4735 struct mgmt_ltk_info *key = &cp->keys[i];
4736
4737 if (!ltk_is_valid(key))
4738 return cmd_status(sk, hdev->id,
4739 MGMT_OP_LOAD_LONG_TERM_KEYS,
4740 MGMT_STATUS_INVALID_PARAMS);
4741 }
4742
4743 hci_dev_lock(hdev);
4744
4745 hci_smp_ltks_clear(hdev);
4746
4747 for (i = 0; i < key_count; i++) {
4748 struct mgmt_ltk_info *key = &cp->keys[i];
4749 u8 type, addr_type, authenticated;
4750
4751 if (key->addr.type == BDADDR_LE_PUBLIC)
4752 addr_type = ADDR_LE_DEV_PUBLIC;
4753 else
4754 addr_type = ADDR_LE_DEV_RANDOM;
4755
4756 if (key->master)
4757 type = SMP_LTK;
4758 else
4759 type = SMP_LTK_SLAVE;
4760
4761 switch (key->type) {
4762 case MGMT_LTK_UNAUTHENTICATED:
4763 authenticated = 0x00;
4764 break;
4765 case MGMT_LTK_AUTHENTICATED:
4766 authenticated = 0x01;
4767 break;
4768 default:
4769 continue;
4770 }
4771
4772 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4773 authenticated, key->val, key->enc_size, key->ediv,
4774 key->rand);
4775 }
4776
4777 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4778 NULL, 0);
4779
4780 hci_dev_unlock(hdev);
4781
4782 return err;
4783 }
4784
4785 struct cmd_conn_lookup {
4786 struct hci_conn *conn;
4787 bool valid_tx_power;
4788 u8 mgmt_status;
4789 };
4790
4791 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4792 {
4793 struct cmd_conn_lookup *match = data;
4794 struct mgmt_cp_get_conn_info *cp;
4795 struct mgmt_rp_get_conn_info rp;
4796 struct hci_conn *conn = cmd->user_data;
4797
4798 if (conn != match->conn)
4799 return;
4800
4801 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4802
4803 memset(&rp, 0, sizeof(rp));
4804 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4805 rp.addr.type = cp->addr.type;
4806
4807 if (!match->mgmt_status) {
4808 rp.rssi = conn->rssi;
4809
4810 if (match->valid_tx_power) {
4811 rp.tx_power = conn->tx_power;
4812 rp.max_tx_power = conn->max_tx_power;
4813 } else {
4814 rp.tx_power = HCI_TX_POWER_INVALID;
4815 rp.max_tx_power = HCI_TX_POWER_INVALID;
4816 }
4817 }
4818
4819 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4820 match->mgmt_status, &rp, sizeof(rp));
4821
4822 hci_conn_drop(conn);
4823
4824 mgmt_pending_remove(cmd);
4825 }
4826
4827 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4828 {
4829 struct hci_cp_read_rssi *cp;
4830 struct hci_conn *conn;
4831 struct cmd_conn_lookup match;
4832 u16 handle;
4833
4834 BT_DBG("status 0x%02x", status);
4835
4836 hci_dev_lock(hdev);
4837
4838 /* TX power data is valid in case request completed successfully,
4839 * otherwise we assume it's not valid. At the moment we assume that
4840 * either both or none of current and max values are valid to keep code
4841 * simple.
4842 */
4843 match.valid_tx_power = !status;
4844
4845 /* Commands sent in request are either Read RSSI or Read Transmit Power
4846 * Level so we check which one was last sent to retrieve connection
4847 * handle. Both commands have handle as first parameter so it's safe to
4848 * cast data on the same command struct.
4849 *
4850 * First command sent is always Read RSSI and we fail only if it fails.
4851 * In other case we simply override error to indicate success as we
4852 * already remembered if TX power value is actually valid.
4853 */
4854 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4855 if (!cp) {
4856 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4857 status = 0;
4858 }
4859
4860 if (!cp) {
4861 BT_ERR("invalid sent_cmd in response");
4862 goto unlock;
4863 }
4864
4865 handle = __le16_to_cpu(cp->handle);
4866 conn = hci_conn_hash_lookup_handle(hdev, handle);
4867 if (!conn) {
4868 BT_ERR("unknown handle (%d) in response", handle);
4869 goto unlock;
4870 }
4871
4872 match.conn = conn;
4873 match.mgmt_status = mgmt_status(status);
4874
4875 /* Cache refresh is complete, now reply for mgmt request for given
4876 * connection only.
4877 */
4878 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4879 get_conn_info_complete, &match);
4880
4881 unlock:
4882 hci_dev_unlock(hdev);
4883 }
4884
4885 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4886 u16 len)
4887 {
4888 struct mgmt_cp_get_conn_info *cp = data;
4889 struct mgmt_rp_get_conn_info rp;
4890 struct hci_conn *conn;
4891 unsigned long conn_info_age;
4892 int err = 0;
4893
4894 BT_DBG("%s", hdev->name);
4895
4896 memset(&rp, 0, sizeof(rp));
4897 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4898 rp.addr.type = cp->addr.type;
4899
4900 if (!bdaddr_type_is_valid(cp->addr.type))
4901 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4902 MGMT_STATUS_INVALID_PARAMS,
4903 &rp, sizeof(rp));
4904
4905 hci_dev_lock(hdev);
4906
4907 if (!hdev_is_powered(hdev)) {
4908 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4909 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4910 goto unlock;
4911 }
4912
4913 if (cp->addr.type == BDADDR_BREDR)
4914 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4915 &cp->addr.bdaddr);
4916 else
4917 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4918
4919 if (!conn || conn->state != BT_CONNECTED) {
4920 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4921 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4922 goto unlock;
4923 }
4924
4925 /* To avoid client trying to guess when to poll again for information we
4926 * calculate conn info age as random value between min/max set in hdev.
4927 */
4928 conn_info_age = hdev->conn_info_min_age +
4929 prandom_u32_max(hdev->conn_info_max_age -
4930 hdev->conn_info_min_age);
4931
4932 /* Query controller to refresh cached values if they are too old or were
4933 * never read.
4934 */
4935 if (time_after(jiffies, conn->conn_info_timestamp +
4936 msecs_to_jiffies(conn_info_age)) ||
4937 !conn->conn_info_timestamp) {
4938 struct hci_request req;
4939 struct hci_cp_read_tx_power req_txp_cp;
4940 struct hci_cp_read_rssi req_rssi_cp;
4941 struct pending_cmd *cmd;
4942
4943 hci_req_init(&req, hdev);
4944 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4945 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4946 &req_rssi_cp);
4947
4948 /* For LE links TX power does not change thus we don't need to
4949 * query for it once value is known.
4950 */
4951 if (!bdaddr_type_is_le(cp->addr.type) ||
4952 conn->tx_power == HCI_TX_POWER_INVALID) {
4953 req_txp_cp.handle = cpu_to_le16(conn->handle);
4954 req_txp_cp.type = 0x00;
4955 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4956 sizeof(req_txp_cp), &req_txp_cp);
4957 }
4958
4959 /* Max TX power needs to be read only once per connection */
4960 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4961 req_txp_cp.handle = cpu_to_le16(conn->handle);
4962 req_txp_cp.type = 0x01;
4963 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4964 sizeof(req_txp_cp), &req_txp_cp);
4965 }
4966
4967 err = hci_req_run(&req, conn_info_refresh_complete);
4968 if (err < 0)
4969 goto unlock;
4970
4971 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4972 data, len);
4973 if (!cmd) {
4974 err = -ENOMEM;
4975 goto unlock;
4976 }
4977
4978 hci_conn_hold(conn);
4979 cmd->user_data = conn;
4980
4981 conn->conn_info_timestamp = jiffies;
4982 } else {
4983 /* Cache is valid, just reply with values cached in hci_conn */
4984 rp.rssi = conn->rssi;
4985 rp.tx_power = conn->tx_power;
4986 rp.max_tx_power = conn->max_tx_power;
4987
4988 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4989 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4990 }
4991
4992 unlock:
4993 hci_dev_unlock(hdev);
4994 return err;
4995 }
4996
4997 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4998 {
4999 struct mgmt_cp_get_clock_info *cp;
5000 struct mgmt_rp_get_clock_info rp;
5001 struct hci_cp_read_clock *hci_cp;
5002 struct pending_cmd *cmd;
5003 struct hci_conn *conn;
5004
5005 BT_DBG("%s status %u", hdev->name, status);
5006
5007 hci_dev_lock(hdev);
5008
5009 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5010 if (!hci_cp)
5011 goto unlock;
5012
5013 if (hci_cp->which) {
5014 u16 handle = __le16_to_cpu(hci_cp->handle);
5015 conn = hci_conn_hash_lookup_handle(hdev, handle);
5016 } else {
5017 conn = NULL;
5018 }
5019
5020 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5021 if (!cmd)
5022 goto unlock;
5023
5024 cp = cmd->param;
5025
5026 memset(&rp, 0, sizeof(rp));
5027 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5028
5029 if (status)
5030 goto send_rsp;
5031
5032 rp.local_clock = cpu_to_le32(hdev->clock);
5033
5034 if (conn) {
5035 rp.piconet_clock = cpu_to_le32(conn->clock);
5036 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5037 }
5038
5039 send_rsp:
5040 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5041 &rp, sizeof(rp));
5042 mgmt_pending_remove(cmd);
5043 if (conn)
5044 hci_conn_drop(conn);
5045
5046 unlock:
5047 hci_dev_unlock(hdev);
5048 }
5049
5050 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5051 u16 len)
5052 {
5053 struct mgmt_cp_get_clock_info *cp = data;
5054 struct mgmt_rp_get_clock_info rp;
5055 struct hci_cp_read_clock hci_cp;
5056 struct pending_cmd *cmd;
5057 struct hci_request req;
5058 struct hci_conn *conn;
5059 int err;
5060
5061 BT_DBG("%s", hdev->name);
5062
5063 memset(&rp, 0, sizeof(rp));
5064 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5065 rp.addr.type = cp->addr.type;
5066
5067 if (cp->addr.type != BDADDR_BREDR)
5068 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5069 MGMT_STATUS_INVALID_PARAMS,
5070 &rp, sizeof(rp));
5071
5072 hci_dev_lock(hdev);
5073
5074 if (!hdev_is_powered(hdev)) {
5075 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5076 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5077 goto unlock;
5078 }
5079
5080 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5081 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5082 &cp->addr.bdaddr);
5083 if (!conn || conn->state != BT_CONNECTED) {
5084 err = cmd_complete(sk, hdev->id,
5085 MGMT_OP_GET_CLOCK_INFO,
5086 MGMT_STATUS_NOT_CONNECTED,
5087 &rp, sizeof(rp));
5088 goto unlock;
5089 }
5090 } else {
5091 conn = NULL;
5092 }
5093
5094 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5095 if (!cmd) {
5096 err = -ENOMEM;
5097 goto unlock;
5098 }
5099
5100 hci_req_init(&req, hdev);
5101
5102 memset(&hci_cp, 0, sizeof(hci_cp));
5103 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5104
5105 if (conn) {
5106 hci_conn_hold(conn);
5107 cmd->user_data = conn;
5108
5109 hci_cp.handle = cpu_to_le16(conn->handle);
5110 hci_cp.which = 0x01; /* Piconet clock */
5111 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5112 }
5113
5114 err = hci_req_run(&req, get_clock_info_complete);
5115 if (err < 0)
5116 mgmt_pending_remove(cmd);
5117
5118 unlock:
5119 hci_dev_unlock(hdev);
5120 return err;
5121 }
5122
5123 static void device_added(struct sock *sk, struct hci_dev *hdev,
5124 bdaddr_t *bdaddr, u8 type, u8 action)
5125 {
5126 struct mgmt_ev_device_added ev;
5127
5128 bacpy(&ev.addr.bdaddr, bdaddr);
5129 ev.addr.type = type;
5130 ev.action = action;
5131
5132 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5133 }
5134
5135 static int add_device(struct sock *sk, struct hci_dev *hdev,
5136 void *data, u16 len)
5137 {
5138 struct mgmt_cp_add_device *cp = data;
5139 u8 auto_conn, addr_type;
5140 int err;
5141
5142 BT_DBG("%s", hdev->name);
5143
5144 if (!bdaddr_type_is_le(cp->addr.type) ||
5145 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5146 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5147 MGMT_STATUS_INVALID_PARAMS,
5148 &cp->addr, sizeof(cp->addr));
5149
5150 if (cp->action != 0x00 && cp->action != 0x01)
5151 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5152 MGMT_STATUS_INVALID_PARAMS,
5153 &cp->addr, sizeof(cp->addr));
5154
5155 hci_dev_lock(hdev);
5156
5157 if (cp->addr.type == BDADDR_LE_PUBLIC)
5158 addr_type = ADDR_LE_DEV_PUBLIC;
5159 else
5160 addr_type = ADDR_LE_DEV_RANDOM;
5161
5162 if (cp->action)
5163 auto_conn = HCI_AUTO_CONN_ALWAYS;
5164 else
5165 auto_conn = HCI_AUTO_CONN_REPORT;
5166
5167 /* If the connection parameters don't exist for this device,
5168 * they will be created and configured with defaults.
5169 */
5170 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5171 auto_conn) < 0) {
5172 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5173 MGMT_STATUS_FAILED,
5174 &cp->addr, sizeof(cp->addr));
5175 goto unlock;
5176 }
5177
5178 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5179
5180 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5181 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5182
5183 unlock:
5184 hci_dev_unlock(hdev);
5185 return err;
5186 }
5187
5188 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5189 bdaddr_t *bdaddr, u8 type)
5190 {
5191 struct mgmt_ev_device_removed ev;
5192
5193 bacpy(&ev.addr.bdaddr, bdaddr);
5194 ev.addr.type = type;
5195
5196 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5197 }
5198
5199 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5200 void *data, u16 len)
5201 {
5202 struct mgmt_cp_remove_device *cp = data;
5203 int err;
5204
5205 BT_DBG("%s", hdev->name);
5206
5207 hci_dev_lock(hdev);
5208
5209 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5210 struct hci_conn_params *params;
5211 u8 addr_type;
5212
5213 if (!bdaddr_type_is_le(cp->addr.type)) {
5214 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5215 MGMT_STATUS_INVALID_PARAMS,
5216 &cp->addr, sizeof(cp->addr));
5217 goto unlock;
5218 }
5219
5220 if (cp->addr.type == BDADDR_LE_PUBLIC)
5221 addr_type = ADDR_LE_DEV_PUBLIC;
5222 else
5223 addr_type = ADDR_LE_DEV_RANDOM;
5224
5225 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5226 addr_type);
5227 if (!params) {
5228 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5229 MGMT_STATUS_INVALID_PARAMS,
5230 &cp->addr, sizeof(cp->addr));
5231 goto unlock;
5232 }
5233
5234 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5235 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5236 MGMT_STATUS_INVALID_PARAMS,
5237 &cp->addr, sizeof(cp->addr));
5238 goto unlock;
5239 }
5240
5241 list_del(&params->action);
5242 list_del(&params->list);
5243 kfree(params);
5244 hci_update_background_scan(hdev);
5245
5246 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5247 } else {
5248 if (cp->addr.type) {
5249 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5250 MGMT_STATUS_INVALID_PARAMS,
5251 &cp->addr, sizeof(cp->addr));
5252 goto unlock;
5253 }
5254
5255 hci_conn_params_clear_enabled(hdev);
5256 }
5257
5258 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5259 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5260
5261 unlock:
5262 hci_dev_unlock(hdev);
5263 return err;
5264 }
5265
5266 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5267 u16 len)
5268 {
5269 struct mgmt_cp_load_conn_param *cp = data;
5270 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5271 sizeof(struct mgmt_conn_param));
5272 u16 param_count, expected_len;
5273 int i;
5274
5275 if (!lmp_le_capable(hdev))
5276 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5277 MGMT_STATUS_NOT_SUPPORTED);
5278
5279 param_count = __le16_to_cpu(cp->param_count);
5280 if (param_count > max_param_count) {
5281 BT_ERR("load_conn_param: too big param_count value %u",
5282 param_count);
5283 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5284 MGMT_STATUS_INVALID_PARAMS);
5285 }
5286
5287 expected_len = sizeof(*cp) + param_count *
5288 sizeof(struct mgmt_conn_param);
5289 if (expected_len != len) {
5290 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5291 expected_len, len);
5292 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5293 MGMT_STATUS_INVALID_PARAMS);
5294 }
5295
5296 BT_DBG("%s param_count %u", hdev->name, param_count);
5297
5298 hci_dev_lock(hdev);
5299
5300 hci_conn_params_clear_disabled(hdev);
5301
5302 for (i = 0; i < param_count; i++) {
5303 struct mgmt_conn_param *param = &cp->params[i];
5304 struct hci_conn_params *hci_param;
5305 u16 min, max, latency, timeout;
5306 u8 addr_type;
5307
5308 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5309 param->addr.type);
5310
5311 if (param->addr.type == BDADDR_LE_PUBLIC) {
5312 addr_type = ADDR_LE_DEV_PUBLIC;
5313 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5314 addr_type = ADDR_LE_DEV_RANDOM;
5315 } else {
5316 BT_ERR("Ignoring invalid connection parameters");
5317 continue;
5318 }
5319
5320 min = le16_to_cpu(param->min_interval);
5321 max = le16_to_cpu(param->max_interval);
5322 latency = le16_to_cpu(param->latency);
5323 timeout = le16_to_cpu(param->timeout);
5324
5325 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5326 min, max, latency, timeout);
5327
5328 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5329 BT_ERR("Ignoring invalid connection parameters");
5330 continue;
5331 }
5332
5333 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5334 addr_type);
5335 if (!hci_param) {
5336 BT_ERR("Failed to add connection parameters");
5337 continue;
5338 }
5339
5340 hci_param->conn_min_interval = min;
5341 hci_param->conn_max_interval = max;
5342 hci_param->conn_latency = latency;
5343 hci_param->supervision_timeout = timeout;
5344 }
5345
5346 hci_dev_unlock(hdev);
5347
5348 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5349 }
5350
5351 static const struct mgmt_handler {
5352 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5353 u16 data_len);
5354 bool var_len;
5355 size_t data_len;
5356 } mgmt_handlers[] = {
5357 { NULL }, /* 0x0000 (no command) */
5358 { read_version, false, MGMT_READ_VERSION_SIZE },
5359 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5360 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5361 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5362 { set_powered, false, MGMT_SETTING_SIZE },
5363 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5364 { set_connectable, false, MGMT_SETTING_SIZE },
5365 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5366 { set_pairable, false, MGMT_SETTING_SIZE },
5367 { set_link_security, false, MGMT_SETTING_SIZE },
5368 { set_ssp, false, MGMT_SETTING_SIZE },
5369 { set_hs, false, MGMT_SETTING_SIZE },
5370 { set_le, false, MGMT_SETTING_SIZE },
5371 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5372 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5373 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5374 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5375 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5376 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5377 { disconnect, false, MGMT_DISCONNECT_SIZE },
5378 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5379 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5380 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5381 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5382 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5383 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5384 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5385 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5386 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5387 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5388 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5389 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5390 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5391 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5392 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5393 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5394 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5395 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5396 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5397 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5398 { set_advertising, false, MGMT_SETTING_SIZE },
5399 { set_bredr, false, MGMT_SETTING_SIZE },
5400 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5401 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5402 { set_secure_conn, false, MGMT_SETTING_SIZE },
5403 { set_debug_keys, false, MGMT_SETTING_SIZE },
5404 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5405 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5406 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5407 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5408 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5409 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5410 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5411 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5412 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5413 };
5414
5415 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5416 {
5417 void *buf;
5418 u8 *cp;
5419 struct mgmt_hdr *hdr;
5420 u16 opcode, index, len;
5421 struct hci_dev *hdev = NULL;
5422 const struct mgmt_handler *handler;
5423 int err;
5424
5425 BT_DBG("got %zu bytes", msglen);
5426
5427 if (msglen < sizeof(*hdr))
5428 return -EINVAL;
5429
5430 buf = kmalloc(msglen, GFP_KERNEL);
5431 if (!buf)
5432 return -ENOMEM;
5433
5434 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5435 err = -EFAULT;
5436 goto done;
5437 }
5438
5439 hdr = buf;
5440 opcode = __le16_to_cpu(hdr->opcode);
5441 index = __le16_to_cpu(hdr->index);
5442 len = __le16_to_cpu(hdr->len);
5443
5444 if (len != msglen - sizeof(*hdr)) {
5445 err = -EINVAL;
5446 goto done;
5447 }
5448
5449 if (index != MGMT_INDEX_NONE) {
5450 hdev = hci_dev_get(index);
5451 if (!hdev) {
5452 err = cmd_status(sk, index, opcode,
5453 MGMT_STATUS_INVALID_INDEX);
5454 goto done;
5455 }
5456
5457 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5458 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5459 err = cmd_status(sk, index, opcode,
5460 MGMT_STATUS_INVALID_INDEX);
5461 goto done;
5462 }
5463
5464 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5465 opcode != MGMT_OP_READ_CONFIG_INFO) {
5466 err = cmd_status(sk, index, opcode,
5467 MGMT_STATUS_INVALID_INDEX);
5468 goto done;
5469 }
5470 }
5471
5472 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5473 mgmt_handlers[opcode].func == NULL) {
5474 BT_DBG("Unknown op %u", opcode);
5475 err = cmd_status(sk, index, opcode,
5476 MGMT_STATUS_UNKNOWN_COMMAND);
5477 goto done;
5478 }
5479
5480 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5481 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5482 err = cmd_status(sk, index, opcode,
5483 MGMT_STATUS_INVALID_INDEX);
5484 goto done;
5485 }
5486
5487 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5488 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5489 err = cmd_status(sk, index, opcode,
5490 MGMT_STATUS_INVALID_INDEX);
5491 goto done;
5492 }
5493
5494 handler = &mgmt_handlers[opcode];
5495
5496 if ((handler->var_len && len < handler->data_len) ||
5497 (!handler->var_len && len != handler->data_len)) {
5498 err = cmd_status(sk, index, opcode,
5499 MGMT_STATUS_INVALID_PARAMS);
5500 goto done;
5501 }
5502
5503 if (hdev)
5504 mgmt_init_hdev(sk, hdev);
5505
5506 cp = buf + sizeof(*hdr);
5507
5508 err = handler->func(sk, hdev, cp, len);
5509 if (err < 0)
5510 goto done;
5511
5512 err = msglen;
5513
5514 done:
5515 if (hdev)
5516 hci_dev_put(hdev);
5517
5518 kfree(buf);
5519 return err;
5520 }
5521
5522 void mgmt_index_added(struct hci_dev *hdev)
5523 {
5524 if (hdev->dev_type != HCI_BREDR)
5525 return;
5526
5527 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5528 return;
5529
5530 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5531 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5532 else
5533 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5534 }
5535
5536 void mgmt_index_removed(struct hci_dev *hdev)
5537 {
5538 u8 status = MGMT_STATUS_INVALID_INDEX;
5539
5540 if (hdev->dev_type != HCI_BREDR)
5541 return;
5542
5543 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5544 return;
5545
5546 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5547
5548 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5549 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5550 else
5551 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5552 }
5553
5554 /* This function requires the caller holds hdev->lock */
5555 static void restart_le_actions(struct hci_dev *hdev)
5556 {
5557 struct hci_conn_params *p;
5558
5559 list_for_each_entry(p, &hdev->le_conn_params, list) {
5560 /* Needed for AUTO_OFF case where might not "really"
5561 * have been powered off.
5562 */
5563 list_del_init(&p->action);
5564
5565 switch (p->auto_connect) {
5566 case HCI_AUTO_CONN_ALWAYS:
5567 list_add(&p->action, &hdev->pend_le_conns);
5568 break;
5569 case HCI_AUTO_CONN_REPORT:
5570 list_add(&p->action, &hdev->pend_le_reports);
5571 break;
5572 default:
5573 break;
5574 }
5575 }
5576
5577 hci_update_background_scan(hdev);
5578 }
5579
5580 static void powered_complete(struct hci_dev *hdev, u8 status)
5581 {
5582 struct cmd_lookup match = { NULL, hdev };
5583
5584 BT_DBG("status 0x%02x", status);
5585
5586 hci_dev_lock(hdev);
5587
5588 restart_le_actions(hdev);
5589
5590 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5591
5592 new_settings(hdev, match.sk);
5593
5594 hci_dev_unlock(hdev);
5595
5596 if (match.sk)
5597 sock_put(match.sk);
5598 }
5599
5600 static int powered_update_hci(struct hci_dev *hdev)
5601 {
5602 struct hci_request req;
5603 u8 link_sec;
5604
5605 hci_req_init(&req, hdev);
5606
5607 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5608 !lmp_host_ssp_capable(hdev)) {
5609 u8 ssp = 1;
5610
5611 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5612 }
5613
5614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5615 lmp_bredr_capable(hdev)) {
5616 struct hci_cp_write_le_host_supported cp;
5617
5618 cp.le = 1;
5619 cp.simul = lmp_le_br_capable(hdev);
5620
5621 /* Check first if we already have the right
5622 * host state (host features set)
5623 */
5624 if (cp.le != lmp_host_le_capable(hdev) ||
5625 cp.simul != lmp_host_le_br_capable(hdev))
5626 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5627 sizeof(cp), &cp);
5628 }
5629
5630 if (lmp_le_capable(hdev)) {
5631 /* Make sure the controller has a good default for
5632 * advertising data. This also applies to the case
5633 * where BR/EDR was toggled during the AUTO_OFF phase.
5634 */
5635 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5636 update_adv_data(&req);
5637 update_scan_rsp_data(&req);
5638 }
5639
5640 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5641 enable_advertising(&req);
5642 }
5643
5644 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5645 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5646 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5647 sizeof(link_sec), &link_sec);
5648
5649 if (lmp_bredr_capable(hdev)) {
5650 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5651 set_bredr_scan(&req);
5652 update_class(&req);
5653 update_name(&req);
5654 update_eir(&req);
5655 }
5656
5657 return hci_req_run(&req, powered_complete);
5658 }
5659
5660 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5661 {
5662 struct cmd_lookup match = { NULL, hdev };
5663 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5664 u8 zero_cod[] = { 0, 0, 0 };
5665 int err;
5666
5667 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5668 return 0;
5669
5670 if (powered) {
5671 if (powered_update_hci(hdev) == 0)
5672 return 0;
5673
5674 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5675 &match);
5676 goto new_settings;
5677 }
5678
5679 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5680 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5681
5682 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5683 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5684 zero_cod, sizeof(zero_cod), NULL);
5685
5686 new_settings:
5687 err = new_settings(hdev, match.sk);
5688
5689 if (match.sk)
5690 sock_put(match.sk);
5691
5692 return err;
5693 }
5694
5695 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5696 {
5697 struct pending_cmd *cmd;
5698 u8 status;
5699
5700 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5701 if (!cmd)
5702 return;
5703
5704 if (err == -ERFKILL)
5705 status = MGMT_STATUS_RFKILLED;
5706 else
5707 status = MGMT_STATUS_FAILED;
5708
5709 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5710
5711 mgmt_pending_remove(cmd);
5712 }
5713
5714 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5715 {
5716 struct hci_request req;
5717
5718 hci_dev_lock(hdev);
5719
5720 /* When discoverable timeout triggers, then just make sure
5721 * the limited discoverable flag is cleared. Even in the case
5722 * of a timeout triggered from general discoverable, it is
5723 * safe to unconditionally clear the flag.
5724 */
5725 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5726 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5727
5728 hci_req_init(&req, hdev);
5729 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5730 u8 scan = SCAN_PAGE;
5731 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5732 sizeof(scan), &scan);
5733 }
5734 update_class(&req);
5735 update_adv_data(&req);
5736 hci_req_run(&req, NULL);
5737
5738 hdev->discov_timeout = 0;
5739
5740 new_settings(hdev, NULL);
5741
5742 hci_dev_unlock(hdev);
5743 }
5744
5745 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5746 {
5747 bool changed;
5748
5749 /* Nothing needed here if there's a pending command since that
5750 * commands request completion callback takes care of everything
5751 * necessary.
5752 */
5753 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5754 return;
5755
5756 /* Powering off may clear the scan mode - don't let that interfere */
5757 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5758 return;
5759
5760 if (discoverable) {
5761 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5762 } else {
5763 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5764 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5765 }
5766
5767 if (changed) {
5768 struct hci_request req;
5769
5770 /* In case this change in discoverable was triggered by
5771 * a disabling of connectable there could be a need to
5772 * update the advertising flags.
5773 */
5774 hci_req_init(&req, hdev);
5775 update_adv_data(&req);
5776 hci_req_run(&req, NULL);
5777
5778 new_settings(hdev, NULL);
5779 }
5780 }
5781
5782 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5783 {
5784 bool changed;
5785
5786 /* Nothing needed here if there's a pending command since that
5787 * commands request completion callback takes care of everything
5788 * necessary.
5789 */
5790 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5791 return;
5792
5793 /* Powering off may clear the scan mode - don't let that interfere */
5794 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5795 return;
5796
5797 if (connectable)
5798 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5799 else
5800 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5801
5802 if (changed)
5803 new_settings(hdev, NULL);
5804 }
5805
5806 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5807 {
5808 /* Powering off may stop advertising - don't let that interfere */
5809 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5810 return;
5811
5812 if (advertising)
5813 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5814 else
5815 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5816 }
5817
5818 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5819 {
5820 u8 mgmt_err = mgmt_status(status);
5821
5822 if (scan & SCAN_PAGE)
5823 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5824 cmd_status_rsp, &mgmt_err);
5825
5826 if (scan & SCAN_INQUIRY)
5827 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5828 cmd_status_rsp, &mgmt_err);
5829 }
5830
5831 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5832 bool persistent)
5833 {
5834 struct mgmt_ev_new_link_key ev;
5835
5836 memset(&ev, 0, sizeof(ev));
5837
5838 ev.store_hint = persistent;
5839 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5840 ev.key.addr.type = BDADDR_BREDR;
5841 ev.key.type = key->type;
5842 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5843 ev.key.pin_len = key->pin_len;
5844
5845 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5846 }
5847
5848 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5849 {
5850 if (ltk->authenticated)
5851 return MGMT_LTK_AUTHENTICATED;
5852
5853 return MGMT_LTK_UNAUTHENTICATED;
5854 }
5855
5856 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5857 {
5858 struct mgmt_ev_new_long_term_key ev;
5859
5860 memset(&ev, 0, sizeof(ev));
5861
5862 /* Devices using resolvable or non-resolvable random addresses
5863 * without providing an indentity resolving key don't require
5864 * to store long term keys. Their addresses will change the
5865 * next time around.
5866 *
5867 * Only when a remote device provides an identity address
5868 * make sure the long term key is stored. If the remote
5869 * identity is known, the long term keys are internally
5870 * mapped to the identity address. So allow static random
5871 * and public addresses here.
5872 */
5873 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5874 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5875 ev.store_hint = 0x00;
5876 else
5877 ev.store_hint = persistent;
5878
5879 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5880 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5881 ev.key.type = mgmt_ltk_type(key);
5882 ev.key.enc_size = key->enc_size;
5883 ev.key.ediv = key->ediv;
5884 ev.key.rand = key->rand;
5885
5886 if (key->type == SMP_LTK)
5887 ev.key.master = 1;
5888
5889 memcpy(ev.key.val, key->val, sizeof(key->val));
5890
5891 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5892 }
5893
5894 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5895 {
5896 struct mgmt_ev_new_irk ev;
5897
5898 memset(&ev, 0, sizeof(ev));
5899
5900 /* For identity resolving keys from devices that are already
5901 * using a public address or static random address, do not
5902 * ask for storing this key. The identity resolving key really
5903 * is only mandatory for devices using resovlable random
5904 * addresses.
5905 *
5906 * Storing all identity resolving keys has the downside that
5907 * they will be also loaded on next boot of they system. More
5908 * identity resolving keys, means more time during scanning is
5909 * needed to actually resolve these addresses.
5910 */
5911 if (bacmp(&irk->rpa, BDADDR_ANY))
5912 ev.store_hint = 0x01;
5913 else
5914 ev.store_hint = 0x00;
5915
5916 bacpy(&ev.rpa, &irk->rpa);
5917 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5918 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5919 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5920
5921 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5922 }
5923
5924 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5925 bool persistent)
5926 {
5927 struct mgmt_ev_new_csrk ev;
5928
5929 memset(&ev, 0, sizeof(ev));
5930
5931 /* Devices using resolvable or non-resolvable random addresses
5932 * without providing an indentity resolving key don't require
5933 * to store signature resolving keys. Their addresses will change
5934 * the next time around.
5935 *
5936 * Only when a remote device provides an identity address
5937 * make sure the signature resolving key is stored. So allow
5938 * static random and public addresses here.
5939 */
5940 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5941 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5942 ev.store_hint = 0x00;
5943 else
5944 ev.store_hint = persistent;
5945
5946 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5947 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5948 ev.key.master = csrk->master;
5949 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5950
5951 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5952 }
5953
5954 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
5955 u8 bdaddr_type, u8 store_hint, u16 min_interval,
5956 u16 max_interval, u16 latency, u16 timeout)
5957 {
5958 struct mgmt_ev_new_conn_param ev;
5959
5960 if (!hci_is_identity_address(bdaddr, bdaddr_type))
5961 return;
5962
5963 memset(&ev, 0, sizeof(ev));
5964 bacpy(&ev.addr.bdaddr, bdaddr);
5965 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
5966 ev.store_hint = store_hint;
5967 ev.min_interval = cpu_to_le16(min_interval);
5968 ev.max_interval = cpu_to_le16(max_interval);
5969 ev.latency = cpu_to_le16(latency);
5970 ev.timeout = cpu_to_le16(timeout);
5971
5972 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
5973 }
5974
5975 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5976 u8 data_len)
5977 {
5978 eir[eir_len++] = sizeof(type) + data_len;
5979 eir[eir_len++] = type;
5980 memcpy(&eir[eir_len], data, data_len);
5981 eir_len += data_len;
5982
5983 return eir_len;
5984 }
5985
5986 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5987 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5988 u8 *dev_class)
5989 {
5990 char buf[512];
5991 struct mgmt_ev_device_connected *ev = (void *) buf;
5992 u16 eir_len = 0;
5993
5994 bacpy(&ev->addr.bdaddr, bdaddr);
5995 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5996
5997 ev->flags = __cpu_to_le32(flags);
5998
5999 if (name_len > 0)
6000 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6001 name, name_len);
6002
6003 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6004 eir_len = eir_append_data(ev->eir, eir_len,
6005 EIR_CLASS_OF_DEV, dev_class, 3);
6006
6007 ev->eir_len = cpu_to_le16(eir_len);
6008
6009 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6010 sizeof(*ev) + eir_len, NULL);
6011 }
6012
6013 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6014 {
6015 struct mgmt_cp_disconnect *cp = cmd->param;
6016 struct sock **sk = data;
6017 struct mgmt_rp_disconnect rp;
6018
6019 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6020 rp.addr.type = cp->addr.type;
6021
6022 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6023 sizeof(rp));
6024
6025 *sk = cmd->sk;
6026 sock_hold(*sk);
6027
6028 mgmt_pending_remove(cmd);
6029 }
6030
6031 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6032 {
6033 struct hci_dev *hdev = data;
6034 struct mgmt_cp_unpair_device *cp = cmd->param;
6035 struct mgmt_rp_unpair_device rp;
6036
6037 memset(&rp, 0, sizeof(rp));
6038 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6039 rp.addr.type = cp->addr.type;
6040
6041 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6042
6043 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6044
6045 mgmt_pending_remove(cmd);
6046 }
6047
6048 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6049 u8 link_type, u8 addr_type, u8 reason,
6050 bool mgmt_connected)
6051 {
6052 struct mgmt_ev_device_disconnected ev;
6053 struct pending_cmd *power_off;
6054 struct sock *sk = NULL;
6055
6056 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6057 if (power_off) {
6058 struct mgmt_mode *cp = power_off->param;
6059
6060 /* The connection is still in hci_conn_hash so test for 1
6061 * instead of 0 to know if this is the last one.
6062 */
6063 if (!cp->val && hci_conn_count(hdev) == 1) {
6064 cancel_delayed_work(&hdev->power_off);
6065 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6066 }
6067 }
6068
6069 if (!mgmt_connected)
6070 return;
6071
6072 if (link_type != ACL_LINK && link_type != LE_LINK)
6073 return;
6074
6075 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6076
6077 bacpy(&ev.addr.bdaddr, bdaddr);
6078 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6079 ev.reason = reason;
6080
6081 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6082
6083 if (sk)
6084 sock_put(sk);
6085
6086 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6087 hdev);
6088 }
6089
6090 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6091 u8 link_type, u8 addr_type, u8 status)
6092 {
6093 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6094 struct mgmt_cp_disconnect *cp;
6095 struct mgmt_rp_disconnect rp;
6096 struct pending_cmd *cmd;
6097
6098 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6099 hdev);
6100
6101 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6102 if (!cmd)
6103 return;
6104
6105 cp = cmd->param;
6106
6107 if (bacmp(bdaddr, &cp->addr.bdaddr))
6108 return;
6109
6110 if (cp->addr.type != bdaddr_type)
6111 return;
6112
6113 bacpy(&rp.addr.bdaddr, bdaddr);
6114 rp.addr.type = bdaddr_type;
6115
6116 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6117 mgmt_status(status), &rp, sizeof(rp));
6118
6119 mgmt_pending_remove(cmd);
6120 }
6121
6122 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6123 u8 addr_type, u8 status)
6124 {
6125 struct mgmt_ev_connect_failed ev;
6126 struct pending_cmd *power_off;
6127
6128 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6129 if (power_off) {
6130 struct mgmt_mode *cp = power_off->param;
6131
6132 /* The connection is still in hci_conn_hash so test for 1
6133 * instead of 0 to know if this is the last one.
6134 */
6135 if (!cp->val && hci_conn_count(hdev) == 1) {
6136 cancel_delayed_work(&hdev->power_off);
6137 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6138 }
6139 }
6140
6141 bacpy(&ev.addr.bdaddr, bdaddr);
6142 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6143 ev.status = mgmt_status(status);
6144
6145 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6146 }
6147
6148 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6149 {
6150 struct mgmt_ev_pin_code_request ev;
6151
6152 bacpy(&ev.addr.bdaddr, bdaddr);
6153 ev.addr.type = BDADDR_BREDR;
6154 ev.secure = secure;
6155
6156 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6157 }
6158
6159 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6160 u8 status)
6161 {
6162 struct pending_cmd *cmd;
6163 struct mgmt_rp_pin_code_reply rp;
6164
6165 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6166 if (!cmd)
6167 return;
6168
6169 bacpy(&rp.addr.bdaddr, bdaddr);
6170 rp.addr.type = BDADDR_BREDR;
6171
6172 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6173 mgmt_status(status), &rp, sizeof(rp));
6174
6175 mgmt_pending_remove(cmd);
6176 }
6177
6178 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6179 u8 status)
6180 {
6181 struct pending_cmd *cmd;
6182 struct mgmt_rp_pin_code_reply rp;
6183
6184 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6185 if (!cmd)
6186 return;
6187
6188 bacpy(&rp.addr.bdaddr, bdaddr);
6189 rp.addr.type = BDADDR_BREDR;
6190
6191 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6192 mgmt_status(status), &rp, sizeof(rp));
6193
6194 mgmt_pending_remove(cmd);
6195 }
6196
6197 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6198 u8 link_type, u8 addr_type, u32 value,
6199 u8 confirm_hint)
6200 {
6201 struct mgmt_ev_user_confirm_request ev;
6202
6203 BT_DBG("%s", hdev->name);
6204
6205 bacpy(&ev.addr.bdaddr, bdaddr);
6206 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6207 ev.confirm_hint = confirm_hint;
6208 ev.value = cpu_to_le32(value);
6209
6210 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6211 NULL);
6212 }
6213
6214 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6215 u8 link_type, u8 addr_type)
6216 {
6217 struct mgmt_ev_user_passkey_request ev;
6218
6219 BT_DBG("%s", hdev->name);
6220
6221 bacpy(&ev.addr.bdaddr, bdaddr);
6222 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6223
6224 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6225 NULL);
6226 }
6227
6228 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6229 u8 link_type, u8 addr_type, u8 status,
6230 u8 opcode)
6231 {
6232 struct pending_cmd *cmd;
6233 struct mgmt_rp_user_confirm_reply rp;
6234 int err;
6235
6236 cmd = mgmt_pending_find(opcode, hdev);
6237 if (!cmd)
6238 return -ENOENT;
6239
6240 bacpy(&rp.addr.bdaddr, bdaddr);
6241 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6242 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6243 &rp, sizeof(rp));
6244
6245 mgmt_pending_remove(cmd);
6246
6247 return err;
6248 }
6249
6250 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6251 u8 link_type, u8 addr_type, u8 status)
6252 {
6253 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6254 status, MGMT_OP_USER_CONFIRM_REPLY);
6255 }
6256
6257 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6258 u8 link_type, u8 addr_type, u8 status)
6259 {
6260 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6261 status,
6262 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6263 }
6264
6265 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6266 u8 link_type, u8 addr_type, u8 status)
6267 {
6268 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6269 status, MGMT_OP_USER_PASSKEY_REPLY);
6270 }
6271
6272 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6273 u8 link_type, u8 addr_type, u8 status)
6274 {
6275 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6276 status,
6277 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6278 }
6279
6280 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6281 u8 link_type, u8 addr_type, u32 passkey,
6282 u8 entered)
6283 {
6284 struct mgmt_ev_passkey_notify ev;
6285
6286 BT_DBG("%s", hdev->name);
6287
6288 bacpy(&ev.addr.bdaddr, bdaddr);
6289 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6290 ev.passkey = __cpu_to_le32(passkey);
6291 ev.entered = entered;
6292
6293 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6294 }
6295
6296 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6297 u8 addr_type, u8 status)
6298 {
6299 struct mgmt_ev_auth_failed ev;
6300
6301 bacpy(&ev.addr.bdaddr, bdaddr);
6302 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6303 ev.status = mgmt_status(status);
6304
6305 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6306 }
6307
6308 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6309 {
6310 struct cmd_lookup match = { NULL, hdev };
6311 bool changed;
6312
6313 if (status) {
6314 u8 mgmt_err = mgmt_status(status);
6315 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6316 cmd_status_rsp, &mgmt_err);
6317 return;
6318 }
6319
6320 if (test_bit(HCI_AUTH, &hdev->flags))
6321 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6322 &hdev->dev_flags);
6323 else
6324 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6325 &hdev->dev_flags);
6326
6327 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6328 &match);
6329
6330 if (changed)
6331 new_settings(hdev, match.sk);
6332
6333 if (match.sk)
6334 sock_put(match.sk);
6335 }
6336
6337 static void clear_eir(struct hci_request *req)
6338 {
6339 struct hci_dev *hdev = req->hdev;
6340 struct hci_cp_write_eir cp;
6341
6342 if (!lmp_ext_inq_capable(hdev))
6343 return;
6344
6345 memset(hdev->eir, 0, sizeof(hdev->eir));
6346
6347 memset(&cp, 0, sizeof(cp));
6348
6349 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6350 }
6351
6352 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6353 {
6354 struct cmd_lookup match = { NULL, hdev };
6355 struct hci_request req;
6356 bool changed = false;
6357
6358 if (status) {
6359 u8 mgmt_err = mgmt_status(status);
6360
6361 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6362 &hdev->dev_flags)) {
6363 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6364 new_settings(hdev, NULL);
6365 }
6366
6367 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6368 &mgmt_err);
6369 return;
6370 }
6371
6372 if (enable) {
6373 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6374 } else {
6375 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6376 if (!changed)
6377 changed = test_and_clear_bit(HCI_HS_ENABLED,
6378 &hdev->dev_flags);
6379 else
6380 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6381 }
6382
6383 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6384
6385 if (changed)
6386 new_settings(hdev, match.sk);
6387
6388 if (match.sk)
6389 sock_put(match.sk);
6390
6391 hci_req_init(&req, hdev);
6392
6393 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6394 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6395 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6396 sizeof(enable), &enable);
6397 update_eir(&req);
6398 } else {
6399 clear_eir(&req);
6400 }
6401
6402 hci_req_run(&req, NULL);
6403 }
6404
6405 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6406 {
6407 struct cmd_lookup match = { NULL, hdev };
6408 bool changed = false;
6409
6410 if (status) {
6411 u8 mgmt_err = mgmt_status(status);
6412
6413 if (enable) {
6414 if (test_and_clear_bit(HCI_SC_ENABLED,
6415 &hdev->dev_flags))
6416 new_settings(hdev, NULL);
6417 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6418 }
6419
6420 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6421 cmd_status_rsp, &mgmt_err);
6422 return;
6423 }
6424
6425 if (enable) {
6426 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6427 } else {
6428 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6429 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6430 }
6431
6432 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6433 settings_rsp, &match);
6434
6435 if (changed)
6436 new_settings(hdev, match.sk);
6437
6438 if (match.sk)
6439 sock_put(match.sk);
6440 }
6441
6442 static void sk_lookup(struct pending_cmd *cmd, void *data)
6443 {
6444 struct cmd_lookup *match = data;
6445
6446 if (match->sk == NULL) {
6447 match->sk = cmd->sk;
6448 sock_hold(match->sk);
6449 }
6450 }
6451
6452 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6453 u8 status)
6454 {
6455 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6456
6457 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6458 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6459 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6460
6461 if (!status)
6462 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6463 NULL);
6464
6465 if (match.sk)
6466 sock_put(match.sk);
6467 }
6468
6469 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6470 {
6471 struct mgmt_cp_set_local_name ev;
6472 struct pending_cmd *cmd;
6473
6474 if (status)
6475 return;
6476
6477 memset(&ev, 0, sizeof(ev));
6478 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6479 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6480
6481 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6482 if (!cmd) {
6483 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6484
6485 /* If this is a HCI command related to powering on the
6486 * HCI dev don't send any mgmt signals.
6487 */
6488 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6489 return;
6490 }
6491
6492 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6493 cmd ? cmd->sk : NULL);
6494 }
6495
6496 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6497 u8 *randomizer192, u8 *hash256,
6498 u8 *randomizer256, u8 status)
6499 {
6500 struct pending_cmd *cmd;
6501
6502 BT_DBG("%s status %u", hdev->name, status);
6503
6504 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6505 if (!cmd)
6506 return;
6507
6508 if (status) {
6509 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6510 mgmt_status(status));
6511 } else {
6512 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6513 hash256 && randomizer256) {
6514 struct mgmt_rp_read_local_oob_ext_data rp;
6515
6516 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6517 memcpy(rp.randomizer192, randomizer192,
6518 sizeof(rp.randomizer192));
6519
6520 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6521 memcpy(rp.randomizer256, randomizer256,
6522 sizeof(rp.randomizer256));
6523
6524 cmd_complete(cmd->sk, hdev->id,
6525 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6526 &rp, sizeof(rp));
6527 } else {
6528 struct mgmt_rp_read_local_oob_data rp;
6529
6530 memcpy(rp.hash, hash192, sizeof(rp.hash));
6531 memcpy(rp.randomizer, randomizer192,
6532 sizeof(rp.randomizer));
6533
6534 cmd_complete(cmd->sk, hdev->id,
6535 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6536 &rp, sizeof(rp));
6537 }
6538 }
6539
6540 mgmt_pending_remove(cmd);
6541 }
6542
6543 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6544 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6545 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6546 {
6547 char buf[512];
6548 struct mgmt_ev_device_found *ev = (void *) buf;
6549 struct smp_irk *irk;
6550 size_t ev_size;
6551
6552 /* Don't send events for a non-kernel initiated discovery. With
6553 * LE one exception is if we have pend_le_reports > 0 in which
6554 * case we're doing passive scanning and want these events.
6555 */
6556 if (!hci_discovery_active(hdev)) {
6557 if (link_type == ACL_LINK)
6558 return;
6559 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6560 return;
6561 }
6562
6563 /* Make sure that the buffer is big enough. The 5 extra bytes
6564 * are for the potential CoD field.
6565 */
6566 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6567 return;
6568
6569 memset(buf, 0, sizeof(buf));
6570
6571 irk = hci_get_irk(hdev, bdaddr, addr_type);
6572 if (irk) {
6573 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6574 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6575 } else {
6576 bacpy(&ev->addr.bdaddr, bdaddr);
6577 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6578 }
6579
6580 ev->rssi = rssi;
6581 ev->flags = cpu_to_le32(flags);
6582
6583 if (eir_len > 0)
6584 memcpy(ev->eir, eir, eir_len);
6585
6586 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6587 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6588 dev_class, 3);
6589
6590 if (scan_rsp_len > 0)
6591 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6592
6593 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6594 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6595
6596 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6597 }
6598
6599 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6600 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6601 {
6602 struct mgmt_ev_device_found *ev;
6603 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6604 u16 eir_len;
6605
6606 ev = (struct mgmt_ev_device_found *) buf;
6607
6608 memset(buf, 0, sizeof(buf));
6609
6610 bacpy(&ev->addr.bdaddr, bdaddr);
6611 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6612 ev->rssi = rssi;
6613
6614 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6615 name_len);
6616
6617 ev->eir_len = cpu_to_le16(eir_len);
6618
6619 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6620 }
6621
6622 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6623 {
6624 struct mgmt_ev_discovering ev;
6625 struct pending_cmd *cmd;
6626
6627 BT_DBG("%s discovering %u", hdev->name, discovering);
6628
6629 if (discovering)
6630 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6631 else
6632 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6633
6634 if (cmd != NULL) {
6635 u8 type = hdev->discovery.type;
6636
6637 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6638 sizeof(type));
6639 mgmt_pending_remove(cmd);
6640 }
6641
6642 memset(&ev, 0, sizeof(ev));
6643 ev.type = hdev->discovery.type;
6644 ev.discovering = discovering;
6645
6646 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6647 }
6648
6649 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6650 {
6651 BT_DBG("%s status %u", hdev->name, status);
6652
6653 /* Clear the advertising mgmt setting if we failed to re-enable it */
6654 if (status) {
6655 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6656 new_settings(hdev, NULL);
6657 }
6658 }
6659
6660 void mgmt_reenable_advertising(struct hci_dev *hdev)
6661 {
6662 struct hci_request req;
6663
6664 if (hci_conn_num(hdev, LE_LINK) > 0)
6665 return;
6666
6667 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6668 return;
6669
6670 hci_req_init(&req, hdev);
6671 enable_advertising(&req);
6672
6673 /* If this fails we have no option but to let user space know
6674 * that we've disabled advertising.
6675 */
6676 if (hci_req_run(&req, adv_enable_complete) < 0) {
6677 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6678 new_settings(hdev, NULL);
6679 }
6680 }
This page took 0.317525 seconds and 4 git commands to generate.