Bluetooth: Update New CSRK event to match latest specification
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 8
41
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_READ_INFO,
45 MGMT_OP_SET_POWERED,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_BONDABLE,
50 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_SSP,
52 MGMT_OP_SET_HS,
53 MGMT_OP_SET_LE,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_ADD_UUID,
57 MGMT_OP_REMOVE_UUID,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_DISCONNECT,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_PAIR_DEVICE,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_CONFIRM_NAME,
78 MGMT_OP_BLOCK_DEVICE,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_BREDR,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_SET_PRIVACY,
88 MGMT_OP_LOAD_IRKS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_ADD_DEVICE,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
99 };
100
101 static const u16 mgmt_events[] = {
102 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_ADDED,
104 MGMT_EV_INDEX_REMOVED,
105 MGMT_EV_NEW_SETTINGS,
106 MGMT_EV_CLASS_OF_DEV_CHANGED,
107 MGMT_EV_LOCAL_NAME_CHANGED,
108 MGMT_EV_NEW_LINK_KEY,
109 MGMT_EV_NEW_LONG_TERM_KEY,
110 MGMT_EV_DEVICE_CONNECTED,
111 MGMT_EV_DEVICE_DISCONNECTED,
112 MGMT_EV_CONNECT_FAILED,
113 MGMT_EV_PIN_CODE_REQUEST,
114 MGMT_EV_USER_CONFIRM_REQUEST,
115 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_AUTH_FAILED,
117 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DISCOVERING,
119 MGMT_EV_DEVICE_BLOCKED,
120 MGMT_EV_DEVICE_UNBLOCKED,
121 MGMT_EV_DEVICE_UNPAIRED,
122 MGMT_EV_PASSKEY_NOTIFY,
123 MGMT_EV_NEW_IRK,
124 MGMT_EV_NEW_CSRK,
125 MGMT_EV_DEVICE_ADDED,
126 MGMT_EV_DEVICE_REMOVED,
127 MGMT_EV_NEW_CONN_PARAM,
128 MGMT_EV_UNCONF_INDEX_ADDED,
129 MGMT_EV_UNCONF_INDEX_REMOVED,
130 MGMT_EV_NEW_CONFIG_OPTIONS,
131 };
132
133 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134
135 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00"
137
138 struct pending_cmd {
139 struct list_head list;
140 u16 opcode;
141 int index;
142 void *param;
143 size_t param_len;
144 struct sock *sk;
145 void *user_data;
146 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
147 };
148
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
151 MGMT_STATUS_SUCCESS,
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
212 };
213
214 static u8 mgmt_status(u8 hci_status)
215 {
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
218
219 return MGMT_STATUS_FAILED;
220 }
221
222 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
223 struct sock *skip_sk)
224 {
225 struct sk_buff *skb;
226 struct mgmt_hdr *hdr;
227
228 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
229 if (!skb)
230 return -ENOMEM;
231
232 hdr = (void *) skb_put(skb, sizeof(*hdr));
233 hdr->opcode = cpu_to_le16(event);
234 if (hdev)
235 hdr->index = cpu_to_le16(hdev->id);
236 else
237 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
238 hdr->len = cpu_to_le16(data_len);
239
240 if (data)
241 memcpy(skb_put(skb, data_len), data, data_len);
242
243 /* Time stamp */
244 __net_timestamp(skb);
245
246 hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk);
247 kfree_skb(skb);
248
249 return 0;
250 }
251
252 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
253 {
254 struct sk_buff *skb;
255 struct mgmt_hdr *hdr;
256 struct mgmt_ev_cmd_status *ev;
257 int err;
258
259 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
260
261 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
262 if (!skb)
263 return -ENOMEM;
264
265 hdr = (void *) skb_put(skb, sizeof(*hdr));
266
267 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
268 hdr->index = cpu_to_le16(index);
269 hdr->len = cpu_to_le16(sizeof(*ev));
270
271 ev = (void *) skb_put(skb, sizeof(*ev));
272 ev->status = status;
273 ev->opcode = cpu_to_le16(cmd);
274
275 err = sock_queue_rcv_skb(sk, skb);
276 if (err < 0)
277 kfree_skb(skb);
278
279 return err;
280 }
281
282 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
283 void *rp, size_t rp_len)
284 {
285 struct sk_buff *skb;
286 struct mgmt_hdr *hdr;
287 struct mgmt_ev_cmd_complete *ev;
288 int err;
289
290 BT_DBG("sock %p", sk);
291
292 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
293 if (!skb)
294 return -ENOMEM;
295
296 hdr = (void *) skb_put(skb, sizeof(*hdr));
297
298 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
299 hdr->index = cpu_to_le16(index);
300 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
301
302 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
303 ev->opcode = cpu_to_le16(cmd);
304 ev->status = status;
305
306 if (rp)
307 memcpy(ev->data, rp, rp_len);
308
309 err = sock_queue_rcv_skb(sk, skb);
310 if (err < 0)
311 kfree_skb(skb);
312
313 return err;
314 }
315
316 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
317 u16 data_len)
318 {
319 struct mgmt_rp_read_version rp;
320
321 BT_DBG("sock %p", sk);
322
323 rp.version = MGMT_VERSION;
324 rp.revision = cpu_to_le16(MGMT_REVISION);
325
326 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
327 sizeof(rp));
328 }
329
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
331 u16 data_len)
332 {
333 struct mgmt_rp_read_commands *rp;
334 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
335 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 __le16 *opcode;
337 size_t rp_size;
338 int i, err;
339
340 BT_DBG("sock %p", sk);
341
342 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
343
344 rp = kmalloc(rp_size, GFP_KERNEL);
345 if (!rp)
346 return -ENOMEM;
347
348 rp->num_commands = cpu_to_le16(num_commands);
349 rp->num_events = cpu_to_le16(num_events);
350
351 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
352 put_unaligned_le16(mgmt_commands[i], opcode);
353
354 for (i = 0; i < num_events; i++, opcode++)
355 put_unaligned_le16(mgmt_events[i], opcode);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
358 rp_size);
359 kfree(rp);
360
361 return err;
362 }
363
364 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
365 u16 data_len)
366 {
367 struct mgmt_rp_read_index_list *rp;
368 struct hci_dev *d;
369 size_t rp_len;
370 u16 count;
371 int err;
372
373 BT_DBG("sock %p", sk);
374
375 read_lock(&hci_dev_list_lock);
376
377 count = 0;
378 list_for_each_entry(d, &hci_dev_list, list) {
379 if (d->dev_type == HCI_BREDR &&
380 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
381 count++;
382 }
383
384 rp_len = sizeof(*rp) + (2 * count);
385 rp = kmalloc(rp_len, GFP_ATOMIC);
386 if (!rp) {
387 read_unlock(&hci_dev_list_lock);
388 return -ENOMEM;
389 }
390
391 count = 0;
392 list_for_each_entry(d, &hci_dev_list, list) {
393 if (test_bit(HCI_SETUP, &d->dev_flags) ||
394 test_bit(HCI_CONFIG, &d->dev_flags) ||
395 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
396 continue;
397
398 /* Devices marked as raw-only are neither configured
399 * nor unconfigured controllers.
400 */
401 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
402 continue;
403
404 if (d->dev_type == HCI_BREDR &&
405 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
406 rp->index[count++] = cpu_to_le16(d->id);
407 BT_DBG("Added hci%u", d->id);
408 }
409 }
410
411 rp->num_controllers = cpu_to_le16(count);
412 rp_len = sizeof(*rp) + (2 * count);
413
414 read_unlock(&hci_dev_list_lock);
415
416 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
417 rp_len);
418
419 kfree(rp);
420
421 return err;
422 }
423
424 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
425 void *data, u16 data_len)
426 {
427 struct mgmt_rp_read_unconf_index_list *rp;
428 struct hci_dev *d;
429 size_t rp_len;
430 u16 count;
431 int err;
432
433 BT_DBG("sock %p", sk);
434
435 read_lock(&hci_dev_list_lock);
436
437 count = 0;
438 list_for_each_entry(d, &hci_dev_list, list) {
439 if (d->dev_type == HCI_BREDR &&
440 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
441 count++;
442 }
443
444 rp_len = sizeof(*rp) + (2 * count);
445 rp = kmalloc(rp_len, GFP_ATOMIC);
446 if (!rp) {
447 read_unlock(&hci_dev_list_lock);
448 return -ENOMEM;
449 }
450
451 count = 0;
452 list_for_each_entry(d, &hci_dev_list, list) {
453 if (test_bit(HCI_SETUP, &d->dev_flags) ||
454 test_bit(HCI_CONFIG, &d->dev_flags) ||
455 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
456 continue;
457
458 /* Devices marked as raw-only are neither configured
459 * nor unconfigured controllers.
460 */
461 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
462 continue;
463
464 if (d->dev_type == HCI_BREDR &&
465 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
466 rp->index[count++] = cpu_to_le16(d->id);
467 BT_DBG("Added hci%u", d->id);
468 }
469 }
470
471 rp->num_controllers = cpu_to_le16(count);
472 rp_len = sizeof(*rp) + (2 * count);
473
474 read_unlock(&hci_dev_list_lock);
475
476 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
477 0, rp, rp_len);
478
479 kfree(rp);
480
481 return err;
482 }
483
484 static bool is_configured(struct hci_dev *hdev)
485 {
486 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
487 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
488 return false;
489
490 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
491 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 return false;
493
494 return true;
495 }
496
497 static __le32 get_missing_options(struct hci_dev *hdev)
498 {
499 u32 options = 0;
500
501 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
502 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
503 options |= MGMT_OPTION_EXTERNAL_CONFIG;
504
505 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
506 !bacmp(&hdev->public_addr, BDADDR_ANY))
507 options |= MGMT_OPTION_PUBLIC_ADDRESS;
508
509 return cpu_to_le32(options);
510 }
511
512 static int new_options(struct hci_dev *hdev, struct sock *skip)
513 {
514 __le32 options = get_missing_options(hdev);
515
516 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
517 sizeof(options), skip);
518 }
519
520 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
521 {
522 __le32 options = get_missing_options(hdev);
523
524 return cmd_complete(sk, hdev->id, opcode, 0, &options,
525 sizeof(options));
526 }
527
528 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
529 void *data, u16 data_len)
530 {
531 struct mgmt_rp_read_config_info rp;
532 u32 options = 0;
533
534 BT_DBG("sock %p %s", sk, hdev->name);
535
536 hci_dev_lock(hdev);
537
538 memset(&rp, 0, sizeof(rp));
539 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
540
541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
542 options |= MGMT_OPTION_EXTERNAL_CONFIG;
543
544 if (hdev->set_bdaddr)
545 options |= MGMT_OPTION_PUBLIC_ADDRESS;
546
547 rp.supported_options = cpu_to_le32(options);
548 rp.missing_options = get_missing_options(hdev);
549
550 hci_dev_unlock(hdev);
551
552 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
553 sizeof(rp));
554 }
555
556 static u32 get_supported_settings(struct hci_dev *hdev)
557 {
558 u32 settings = 0;
559
560 settings |= MGMT_SETTING_POWERED;
561 settings |= MGMT_SETTING_BONDABLE;
562 settings |= MGMT_SETTING_DEBUG_KEYS;
563 settings |= MGMT_SETTING_CONNECTABLE;
564 settings |= MGMT_SETTING_DISCOVERABLE;
565
566 if (lmp_bredr_capable(hdev)) {
567 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
568 settings |= MGMT_SETTING_FAST_CONNECTABLE;
569 settings |= MGMT_SETTING_BREDR;
570 settings |= MGMT_SETTING_LINK_SECURITY;
571
572 if (lmp_ssp_capable(hdev)) {
573 settings |= MGMT_SETTING_SSP;
574 settings |= MGMT_SETTING_HS;
575 }
576
577 if (lmp_sc_capable(hdev))
578 settings |= MGMT_SETTING_SECURE_CONN;
579 }
580
581 if (lmp_le_capable(hdev)) {
582 settings |= MGMT_SETTING_LE;
583 settings |= MGMT_SETTING_ADVERTISING;
584 settings |= MGMT_SETTING_SECURE_CONN;
585 settings |= MGMT_SETTING_PRIVACY;
586 }
587
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
589 hdev->set_bdaddr)
590 settings |= MGMT_SETTING_CONFIGURATION;
591
592 return settings;
593 }
594
595 static u32 get_current_settings(struct hci_dev *hdev)
596 {
597 u32 settings = 0;
598
599 if (hdev_is_powered(hdev))
600 settings |= MGMT_SETTING_POWERED;
601
602 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
603 settings |= MGMT_SETTING_CONNECTABLE;
604
605 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
606 settings |= MGMT_SETTING_FAST_CONNECTABLE;
607
608 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
609 settings |= MGMT_SETTING_DISCOVERABLE;
610
611 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
612 settings |= MGMT_SETTING_BONDABLE;
613
614 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
615 settings |= MGMT_SETTING_BREDR;
616
617 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
618 settings |= MGMT_SETTING_LE;
619
620 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
621 settings |= MGMT_SETTING_LINK_SECURITY;
622
623 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
624 settings |= MGMT_SETTING_SSP;
625
626 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
627 settings |= MGMT_SETTING_HS;
628
629 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
630 settings |= MGMT_SETTING_ADVERTISING;
631
632 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
633 settings |= MGMT_SETTING_SECURE_CONN;
634
635 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
636 settings |= MGMT_SETTING_DEBUG_KEYS;
637
638 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
639 settings |= MGMT_SETTING_PRIVACY;
640
641 return settings;
642 }
643
644 #define PNP_INFO_SVCLASS_ID 0x1200
645
646 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
647 {
648 u8 *ptr = data, *uuids_start = NULL;
649 struct bt_uuid *uuid;
650
651 if (len < 4)
652 return ptr;
653
654 list_for_each_entry(uuid, &hdev->uuids, list) {
655 u16 uuid16;
656
657 if (uuid->size != 16)
658 continue;
659
660 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
661 if (uuid16 < 0x1100)
662 continue;
663
664 if (uuid16 == PNP_INFO_SVCLASS_ID)
665 continue;
666
667 if (!uuids_start) {
668 uuids_start = ptr;
669 uuids_start[0] = 1;
670 uuids_start[1] = EIR_UUID16_ALL;
671 ptr += 2;
672 }
673
674 /* Stop if not enough space to put next UUID */
675 if ((ptr - data) + sizeof(u16) > len) {
676 uuids_start[1] = EIR_UUID16_SOME;
677 break;
678 }
679
680 *ptr++ = (uuid16 & 0x00ff);
681 *ptr++ = (uuid16 & 0xff00) >> 8;
682 uuids_start[0] += sizeof(uuid16);
683 }
684
685 return ptr;
686 }
687
688 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
689 {
690 u8 *ptr = data, *uuids_start = NULL;
691 struct bt_uuid *uuid;
692
693 if (len < 6)
694 return ptr;
695
696 list_for_each_entry(uuid, &hdev->uuids, list) {
697 if (uuid->size != 32)
698 continue;
699
700 if (!uuids_start) {
701 uuids_start = ptr;
702 uuids_start[0] = 1;
703 uuids_start[1] = EIR_UUID32_ALL;
704 ptr += 2;
705 }
706
707 /* Stop if not enough space to put next UUID */
708 if ((ptr - data) + sizeof(u32) > len) {
709 uuids_start[1] = EIR_UUID32_SOME;
710 break;
711 }
712
713 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
714 ptr += sizeof(u32);
715 uuids_start[0] += sizeof(u32);
716 }
717
718 return ptr;
719 }
720
721 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
722 {
723 u8 *ptr = data, *uuids_start = NULL;
724 struct bt_uuid *uuid;
725
726 if (len < 18)
727 return ptr;
728
729 list_for_each_entry(uuid, &hdev->uuids, list) {
730 if (uuid->size != 128)
731 continue;
732
733 if (!uuids_start) {
734 uuids_start = ptr;
735 uuids_start[0] = 1;
736 uuids_start[1] = EIR_UUID128_ALL;
737 ptr += 2;
738 }
739
740 /* Stop if not enough space to put next UUID */
741 if ((ptr - data) + 16 > len) {
742 uuids_start[1] = EIR_UUID128_SOME;
743 break;
744 }
745
746 memcpy(ptr, uuid->uuid, 16);
747 ptr += 16;
748 uuids_start[0] += 16;
749 }
750
751 return ptr;
752 }
753
754 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
755 {
756 struct pending_cmd *cmd;
757
758 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
759 if (cmd->opcode == opcode)
760 return cmd;
761 }
762
763 return NULL;
764 }
765
766 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
767 struct hci_dev *hdev,
768 const void *data)
769 {
770 struct pending_cmd *cmd;
771
772 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
773 if (cmd->user_data != data)
774 continue;
775 if (cmd->opcode == opcode)
776 return cmd;
777 }
778
779 return NULL;
780 }
781
782 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 {
784 u8 ad_len = 0;
785 size_t name_len;
786
787 name_len = strlen(hdev->dev_name);
788 if (name_len > 0) {
789 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
790
791 if (name_len > max_len) {
792 name_len = max_len;
793 ptr[1] = EIR_NAME_SHORT;
794 } else
795 ptr[1] = EIR_NAME_COMPLETE;
796
797 ptr[0] = name_len + 1;
798
799 memcpy(ptr + 2, hdev->dev_name, name_len);
800
801 ad_len += (name_len + 2);
802 ptr += (name_len + 2);
803 }
804
805 return ad_len;
806 }
807
808 static void update_scan_rsp_data(struct hci_request *req)
809 {
810 struct hci_dev *hdev = req->hdev;
811 struct hci_cp_le_set_scan_rsp_data cp;
812 u8 len;
813
814 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
815 return;
816
817 memset(&cp, 0, sizeof(cp));
818
819 len = create_scan_rsp_data(hdev, cp.data);
820
821 if (hdev->scan_rsp_data_len == len &&
822 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
823 return;
824
825 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
826 hdev->scan_rsp_data_len = len;
827
828 cp.length = len;
829
830 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
831 }
832
833 static u8 get_adv_discov_flags(struct hci_dev *hdev)
834 {
835 struct pending_cmd *cmd;
836
837 /* If there's a pending mgmt command the flags will not yet have
838 * their final values, so check for this first.
839 */
840 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
841 if (cmd) {
842 struct mgmt_mode *cp = cmd->param;
843 if (cp->val == 0x01)
844 return LE_AD_GENERAL;
845 else if (cp->val == 0x02)
846 return LE_AD_LIMITED;
847 } else {
848 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
849 return LE_AD_LIMITED;
850 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
851 return LE_AD_GENERAL;
852 }
853
854 return 0;
855 }
856
857 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
858 {
859 u8 ad_len = 0, flags = 0;
860
861 flags |= get_adv_discov_flags(hdev);
862
863 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
864 flags |= LE_AD_NO_BREDR;
865
866 if (flags) {
867 BT_DBG("adv flags 0x%02x", flags);
868
869 ptr[0] = 2;
870 ptr[1] = EIR_FLAGS;
871 ptr[2] = flags;
872
873 ad_len += 3;
874 ptr += 3;
875 }
876
877 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
878 ptr[0] = 2;
879 ptr[1] = EIR_TX_POWER;
880 ptr[2] = (u8) hdev->adv_tx_power;
881
882 ad_len += 3;
883 ptr += 3;
884 }
885
886 return ad_len;
887 }
888
889 static void update_adv_data(struct hci_request *req)
890 {
891 struct hci_dev *hdev = req->hdev;
892 struct hci_cp_le_set_adv_data cp;
893 u8 len;
894
895 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
896 return;
897
898 memset(&cp, 0, sizeof(cp));
899
900 len = create_adv_data(hdev, cp.data);
901
902 if (hdev->adv_data_len == len &&
903 memcmp(cp.data, hdev->adv_data, len) == 0)
904 return;
905
906 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
907 hdev->adv_data_len = len;
908
909 cp.length = len;
910
911 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
912 }
913
914 int mgmt_update_adv_data(struct hci_dev *hdev)
915 {
916 struct hci_request req;
917
918 hci_req_init(&req, hdev);
919 update_adv_data(&req);
920
921 return hci_req_run(&req, NULL);
922 }
923
924 static void create_eir(struct hci_dev *hdev, u8 *data)
925 {
926 u8 *ptr = data;
927 size_t name_len;
928
929 name_len = strlen(hdev->dev_name);
930
931 if (name_len > 0) {
932 /* EIR Data type */
933 if (name_len > 48) {
934 name_len = 48;
935 ptr[1] = EIR_NAME_SHORT;
936 } else
937 ptr[1] = EIR_NAME_COMPLETE;
938
939 /* EIR Data length */
940 ptr[0] = name_len + 1;
941
942 memcpy(ptr + 2, hdev->dev_name, name_len);
943
944 ptr += (name_len + 2);
945 }
946
947 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
948 ptr[0] = 2;
949 ptr[1] = EIR_TX_POWER;
950 ptr[2] = (u8) hdev->inq_tx_power;
951
952 ptr += 3;
953 }
954
955 if (hdev->devid_source > 0) {
956 ptr[0] = 9;
957 ptr[1] = EIR_DEVICE_ID;
958
959 put_unaligned_le16(hdev->devid_source, ptr + 2);
960 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
961 put_unaligned_le16(hdev->devid_product, ptr + 6);
962 put_unaligned_le16(hdev->devid_version, ptr + 8);
963
964 ptr += 10;
965 }
966
967 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
969 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
970 }
971
972 static void update_eir(struct hci_request *req)
973 {
974 struct hci_dev *hdev = req->hdev;
975 struct hci_cp_write_eir cp;
976
977 if (!hdev_is_powered(hdev))
978 return;
979
980 if (!lmp_ext_inq_capable(hdev))
981 return;
982
983 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
984 return;
985
986 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
987 return;
988
989 memset(&cp, 0, sizeof(cp));
990
991 create_eir(hdev, cp.data);
992
993 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
994 return;
995
996 memcpy(hdev->eir, cp.data, sizeof(cp.data));
997
998 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
999 }
1000
1001 static u8 get_service_classes(struct hci_dev *hdev)
1002 {
1003 struct bt_uuid *uuid;
1004 u8 val = 0;
1005
1006 list_for_each_entry(uuid, &hdev->uuids, list)
1007 val |= uuid->svc_hint;
1008
1009 return val;
1010 }
1011
1012 static void update_class(struct hci_request *req)
1013 {
1014 struct hci_dev *hdev = req->hdev;
1015 u8 cod[3];
1016
1017 BT_DBG("%s", hdev->name);
1018
1019 if (!hdev_is_powered(hdev))
1020 return;
1021
1022 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1023 return;
1024
1025 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1026 return;
1027
1028 cod[0] = hdev->minor_class;
1029 cod[1] = hdev->major_class;
1030 cod[2] = get_service_classes(hdev);
1031
1032 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1033 cod[1] |= 0x20;
1034
1035 if (memcmp(cod, hdev->dev_class, 3) == 0)
1036 return;
1037
1038 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1039 }
1040
1041 static bool get_connectable(struct hci_dev *hdev)
1042 {
1043 struct pending_cmd *cmd;
1044
1045 /* If there's a pending mgmt command the flag will not yet have
1046 * it's final value, so check for this first.
1047 */
1048 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1049 if (cmd) {
1050 struct mgmt_mode *cp = cmd->param;
1051 return cp->val;
1052 }
1053
1054 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1055 }
1056
1057 static void disable_advertising(struct hci_request *req)
1058 {
1059 u8 enable = 0x00;
1060
1061 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1062 }
1063
1064 static void enable_advertising(struct hci_request *req)
1065 {
1066 struct hci_dev *hdev = req->hdev;
1067 struct hci_cp_le_set_adv_param cp;
1068 u8 own_addr_type, enable = 0x01;
1069 bool connectable;
1070
1071 if (hci_conn_num(hdev, LE_LINK) > 0)
1072 return;
1073
1074 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1075 disable_advertising(req);
1076
1077 /* Clear the HCI_LE_ADV bit temporarily so that the
1078 * hci_update_random_address knows that it's safe to go ahead
1079 * and write a new random address. The flag will be set back on
1080 * as soon as the SET_ADV_ENABLE HCI command completes.
1081 */
1082 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1083
1084 connectable = get_connectable(hdev);
1085
1086 /* Set require_privacy to true only when non-connectable
1087 * advertising is used. In that case it is fine to use a
1088 * non-resolvable private address.
1089 */
1090 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1091 return;
1092
1093 memset(&cp, 0, sizeof(cp));
1094 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1095 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1096 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1097 cp.own_address_type = own_addr_type;
1098 cp.channel_map = hdev->le_adv_channel_map;
1099
1100 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1101
1102 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1103 }
1104
1105 static void service_cache_off(struct work_struct *work)
1106 {
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 service_cache.work);
1109 struct hci_request req;
1110
1111 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1112 return;
1113
1114 hci_req_init(&req, hdev);
1115
1116 hci_dev_lock(hdev);
1117
1118 update_eir(&req);
1119 update_class(&req);
1120
1121 hci_dev_unlock(hdev);
1122
1123 hci_req_run(&req, NULL);
1124 }
1125
1126 static void rpa_expired(struct work_struct *work)
1127 {
1128 struct hci_dev *hdev = container_of(work, struct hci_dev,
1129 rpa_expired.work);
1130 struct hci_request req;
1131
1132 BT_DBG("");
1133
1134 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1135
1136 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1137 return;
1138
1139 /* The generation of a new RPA and programming it into the
1140 * controller happens in the enable_advertising() function.
1141 */
1142 hci_req_init(&req, hdev);
1143 enable_advertising(&req);
1144 hci_req_run(&req, NULL);
1145 }
1146
1147 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1148 {
1149 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1150 return;
1151
1152 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1153 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1154
1155 /* Non-mgmt controlled devices get this bit set
1156 * implicitly so that pairing works for them, however
1157 * for mgmt we require user-space to explicitly enable
1158 * it
1159 */
1160 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1161 }
1162
1163 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1164 void *data, u16 data_len)
1165 {
1166 struct mgmt_rp_read_info rp;
1167
1168 BT_DBG("sock %p %s", sk, hdev->name);
1169
1170 hci_dev_lock(hdev);
1171
1172 memset(&rp, 0, sizeof(rp));
1173
1174 bacpy(&rp.bdaddr, &hdev->bdaddr);
1175
1176 rp.version = hdev->hci_ver;
1177 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1178
1179 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1180 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1181
1182 memcpy(rp.dev_class, hdev->dev_class, 3);
1183
1184 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1185 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1186
1187 hci_dev_unlock(hdev);
1188
1189 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1190 sizeof(rp));
1191 }
1192
1193 static void mgmt_pending_free(struct pending_cmd *cmd)
1194 {
1195 sock_put(cmd->sk);
1196 kfree(cmd->param);
1197 kfree(cmd);
1198 }
1199
1200 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1201 struct hci_dev *hdev, void *data,
1202 u16 len)
1203 {
1204 struct pending_cmd *cmd;
1205
1206 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1207 if (!cmd)
1208 return NULL;
1209
1210 cmd->opcode = opcode;
1211 cmd->index = hdev->id;
1212
1213 cmd->param = kmemdup(data, len, GFP_KERNEL);
1214 if (!cmd->param) {
1215 kfree(cmd);
1216 return NULL;
1217 }
1218
1219 cmd->param_len = len;
1220
1221 cmd->sk = sk;
1222 sock_hold(sk);
1223
1224 list_add(&cmd->list, &hdev->mgmt_pending);
1225
1226 return cmd;
1227 }
1228
1229 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1230 void (*cb)(struct pending_cmd *cmd,
1231 void *data),
1232 void *data)
1233 {
1234 struct pending_cmd *cmd, *tmp;
1235
1236 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1237 if (opcode > 0 && cmd->opcode != opcode)
1238 continue;
1239
1240 cb(cmd, data);
1241 }
1242 }
1243
1244 static void mgmt_pending_remove(struct pending_cmd *cmd)
1245 {
1246 list_del(&cmd->list);
1247 mgmt_pending_free(cmd);
1248 }
1249
1250 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1251 {
1252 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1253
1254 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1255 sizeof(settings));
1256 }
1257
1258 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1259 {
1260 BT_DBG("%s status 0x%02x", hdev->name, status);
1261
1262 if (hci_conn_count(hdev) == 0) {
1263 cancel_delayed_work(&hdev->power_off);
1264 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1265 }
1266 }
1267
1268 static bool hci_stop_discovery(struct hci_request *req)
1269 {
1270 struct hci_dev *hdev = req->hdev;
1271 struct hci_cp_remote_name_req_cancel cp;
1272 struct inquiry_entry *e;
1273
1274 switch (hdev->discovery.state) {
1275 case DISCOVERY_FINDING:
1276 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1277 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1278 } else {
1279 cancel_delayed_work(&hdev->le_scan_disable);
1280 hci_req_add_le_scan_disable(req);
1281 }
1282
1283 return true;
1284
1285 case DISCOVERY_RESOLVING:
1286 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 NAME_PENDING);
1288 if (!e)
1289 break;
1290
1291 bacpy(&cp.bdaddr, &e->data.bdaddr);
1292 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1293 &cp);
1294
1295 return true;
1296
1297 default:
1298 /* Passive scanning */
1299 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1300 hci_req_add_le_scan_disable(req);
1301 return true;
1302 }
1303
1304 break;
1305 }
1306
1307 return false;
1308 }
1309
1310 static int clean_up_hci_state(struct hci_dev *hdev)
1311 {
1312 struct hci_request req;
1313 struct hci_conn *conn;
1314 bool discov_stopped;
1315 int err;
1316
1317 hci_req_init(&req, hdev);
1318
1319 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1320 test_bit(HCI_PSCAN, &hdev->flags)) {
1321 u8 scan = 0x00;
1322 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1323 }
1324
1325 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1326 disable_advertising(&req);
1327
1328 discov_stopped = hci_stop_discovery(&req);
1329
1330 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1331 struct hci_cp_disconnect dc;
1332 struct hci_cp_reject_conn_req rej;
1333
1334 switch (conn->state) {
1335 case BT_CONNECTED:
1336 case BT_CONFIG:
1337 dc.handle = cpu_to_le16(conn->handle);
1338 dc.reason = 0x15; /* Terminated due to Power Off */
1339 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1340 break;
1341 case BT_CONNECT:
1342 if (conn->type == LE_LINK)
1343 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1344 0, NULL);
1345 else if (conn->type == ACL_LINK)
1346 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1347 6, &conn->dst);
1348 break;
1349 case BT_CONNECT2:
1350 bacpy(&rej.bdaddr, &conn->dst);
1351 rej.reason = 0x15; /* Terminated due to Power Off */
1352 if (conn->type == ACL_LINK)
1353 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1354 sizeof(rej), &rej);
1355 else if (conn->type == SCO_LINK)
1356 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1357 sizeof(rej), &rej);
1358 break;
1359 }
1360 }
1361
1362 err = hci_req_run(&req, clean_up_hci_complete);
1363 if (!err && discov_stopped)
1364 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365
1366 return err;
1367 }
1368
1369 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1370 u16 len)
1371 {
1372 struct mgmt_mode *cp = data;
1373 struct pending_cmd *cmd;
1374 int err;
1375
1376 BT_DBG("request for %s", hdev->name);
1377
1378 if (cp->val != 0x00 && cp->val != 0x01)
1379 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1380 MGMT_STATUS_INVALID_PARAMS);
1381
1382 hci_dev_lock(hdev);
1383
1384 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1385 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_BUSY);
1387 goto failed;
1388 }
1389
1390 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1391 cancel_delayed_work(&hdev->power_off);
1392
1393 if (cp->val) {
1394 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1395 data, len);
1396 err = mgmt_powered(hdev, 1);
1397 goto failed;
1398 }
1399 }
1400
1401 if (!!cp->val == hdev_is_powered(hdev)) {
1402 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1403 goto failed;
1404 }
1405
1406 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1407 if (!cmd) {
1408 err = -ENOMEM;
1409 goto failed;
1410 }
1411
1412 if (cp->val) {
1413 queue_work(hdev->req_workqueue, &hdev->power_on);
1414 err = 0;
1415 } else {
1416 /* Disconnect connections, stop scans, etc */
1417 err = clean_up_hci_state(hdev);
1418 if (!err)
1419 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1420 HCI_POWER_OFF_TIMEOUT);
1421
1422 /* ENODATA means there were no HCI commands queued */
1423 if (err == -ENODATA) {
1424 cancel_delayed_work(&hdev->power_off);
1425 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1426 err = 0;
1427 }
1428 }
1429
1430 failed:
1431 hci_dev_unlock(hdev);
1432 return err;
1433 }
1434
1435 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1436 {
1437 __le32 ev;
1438
1439 ev = cpu_to_le32(get_current_settings(hdev));
1440
1441 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1442 }
1443
1444 int mgmt_new_settings(struct hci_dev *hdev)
1445 {
1446 return new_settings(hdev, NULL);
1447 }
1448
1449 struct cmd_lookup {
1450 struct sock *sk;
1451 struct hci_dev *hdev;
1452 u8 mgmt_status;
1453 };
1454
1455 static void settings_rsp(struct pending_cmd *cmd, void *data)
1456 {
1457 struct cmd_lookup *match = data;
1458
1459 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1460
1461 list_del(&cmd->list);
1462
1463 if (match->sk == NULL) {
1464 match->sk = cmd->sk;
1465 sock_hold(match->sk);
1466 }
1467
1468 mgmt_pending_free(cmd);
1469 }
1470
1471 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1472 {
1473 u8 *status = data;
1474
1475 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1476 mgmt_pending_remove(cmd);
1477 }
1478
1479 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1480 {
1481 if (cmd->cmd_complete) {
1482 u8 *status = data;
1483
1484 cmd->cmd_complete(cmd, *status);
1485 mgmt_pending_remove(cmd);
1486
1487 return;
1488 }
1489
1490 cmd_status_rsp(cmd, data);
1491 }
1492
1493 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1494 {
1495 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 cmd->param, cmd->param_len);
1497 }
1498
1499 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1500 {
1501 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1502 sizeof(struct mgmt_addr_info));
1503 }
1504
1505 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1506 {
1507 if (!lmp_bredr_capable(hdev))
1508 return MGMT_STATUS_NOT_SUPPORTED;
1509 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1510 return MGMT_STATUS_REJECTED;
1511 else
1512 return MGMT_STATUS_SUCCESS;
1513 }
1514
1515 static u8 mgmt_le_support(struct hci_dev *hdev)
1516 {
1517 if (!lmp_le_capable(hdev))
1518 return MGMT_STATUS_NOT_SUPPORTED;
1519 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1520 return MGMT_STATUS_REJECTED;
1521 else
1522 return MGMT_STATUS_SUCCESS;
1523 }
1524
1525 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1526 u16 opcode)
1527 {
1528 struct pending_cmd *cmd;
1529 struct mgmt_mode *cp;
1530 struct hci_request req;
1531 bool changed;
1532
1533 BT_DBG("status 0x%02x", status);
1534
1535 hci_dev_lock(hdev);
1536
1537 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1538 if (!cmd)
1539 goto unlock;
1540
1541 if (status) {
1542 u8 mgmt_err = mgmt_status(status);
1543 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1544 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1545 goto remove_cmd;
1546 }
1547
1548 cp = cmd->param;
1549 if (cp->val) {
1550 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1551 &hdev->dev_flags);
1552
1553 if (hdev->discov_timeout > 0) {
1554 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1555 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1556 to);
1557 }
1558 } else {
1559 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1560 &hdev->dev_flags);
1561 }
1562
1563 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1564
1565 if (changed)
1566 new_settings(hdev, cmd->sk);
1567
1568 /* When the discoverable mode gets changed, make sure
1569 * that class of device has the limited discoverable
1570 * bit correctly set. Also update page scan based on whitelist
1571 * entries.
1572 */
1573 hci_req_init(&req, hdev);
1574 __hci_update_page_scan(&req);
1575 update_class(&req);
1576 hci_req_run(&req, NULL);
1577
1578 remove_cmd:
1579 mgmt_pending_remove(cmd);
1580
1581 unlock:
1582 hci_dev_unlock(hdev);
1583 }
1584
1585 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1586 u16 len)
1587 {
1588 struct mgmt_cp_set_discoverable *cp = data;
1589 struct pending_cmd *cmd;
1590 struct hci_request req;
1591 u16 timeout;
1592 u8 scan;
1593 int err;
1594
1595 BT_DBG("request for %s", hdev->name);
1596
1597 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1598 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601
1602 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1603 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1604 MGMT_STATUS_INVALID_PARAMS);
1605
1606 timeout = __le16_to_cpu(cp->timeout);
1607
1608 /* Disabling discoverable requires that no timeout is set,
1609 * and enabling limited discoverable requires a timeout.
1610 */
1611 if ((cp->val == 0x00 && timeout > 0) ||
1612 (cp->val == 0x02 && timeout == 0))
1613 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1614 MGMT_STATUS_INVALID_PARAMS);
1615
1616 hci_dev_lock(hdev);
1617
1618 if (!hdev_is_powered(hdev) && timeout > 0) {
1619 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1620 MGMT_STATUS_NOT_POWERED);
1621 goto failed;
1622 }
1623
1624 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1625 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1626 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 MGMT_STATUS_BUSY);
1628 goto failed;
1629 }
1630
1631 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1632 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1633 MGMT_STATUS_REJECTED);
1634 goto failed;
1635 }
1636
1637 if (!hdev_is_powered(hdev)) {
1638 bool changed = false;
1639
1640 /* Setting limited discoverable when powered off is
1641 * not a valid operation since it requires a timeout
1642 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1643 */
1644 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1645 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1646 changed = true;
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 if (err < 0)
1651 goto failed;
1652
1653 if (changed)
1654 err = new_settings(hdev, sk);
1655
1656 goto failed;
1657 }
1658
1659 /* If the current mode is the same, then just update the timeout
1660 * value with the new value. And if only the timeout gets updated,
1661 * then no need for any HCI transactions.
1662 */
1663 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1664 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1665 &hdev->dev_flags)) {
1666 cancel_delayed_work(&hdev->discov_off);
1667 hdev->discov_timeout = timeout;
1668
1669 if (cp->val && hdev->discov_timeout > 0) {
1670 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1671 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1672 to);
1673 }
1674
1675 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1676 goto failed;
1677 }
1678
1679 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1680 if (!cmd) {
1681 err = -ENOMEM;
1682 goto failed;
1683 }
1684
1685 /* Cancel any potential discoverable timeout that might be
1686 * still active and store new timeout value. The arming of
1687 * the timeout happens in the complete handler.
1688 */
1689 cancel_delayed_work(&hdev->discov_off);
1690 hdev->discov_timeout = timeout;
1691
1692 /* Limited discoverable mode */
1693 if (cp->val == 0x02)
1694 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1695 else
1696 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1697
1698 hci_req_init(&req, hdev);
1699
1700 /* The procedure for LE-only controllers is much simpler - just
1701 * update the advertising data.
1702 */
1703 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1704 goto update_ad;
1705
1706 scan = SCAN_PAGE;
1707
1708 if (cp->val) {
1709 struct hci_cp_write_current_iac_lap hci_cp;
1710
1711 if (cp->val == 0x02) {
1712 /* Limited discoverable mode */
1713 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1714 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1715 hci_cp.iac_lap[1] = 0x8b;
1716 hci_cp.iac_lap[2] = 0x9e;
1717 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1718 hci_cp.iac_lap[4] = 0x8b;
1719 hci_cp.iac_lap[5] = 0x9e;
1720 } else {
1721 /* General discoverable mode */
1722 hci_cp.num_iac = 1;
1723 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1724 hci_cp.iac_lap[1] = 0x8b;
1725 hci_cp.iac_lap[2] = 0x9e;
1726 }
1727
1728 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1729 (hci_cp.num_iac * 3) + 1, &hci_cp);
1730
1731 scan |= SCAN_INQUIRY;
1732 } else {
1733 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1734 }
1735
1736 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1737
1738 update_ad:
1739 update_adv_data(&req);
1740
1741 err = hci_req_run(&req, set_discoverable_complete);
1742 if (err < 0)
1743 mgmt_pending_remove(cmd);
1744
1745 failed:
1746 hci_dev_unlock(hdev);
1747 return err;
1748 }
1749
1750 static void write_fast_connectable(struct hci_request *req, bool enable)
1751 {
1752 struct hci_dev *hdev = req->hdev;
1753 struct hci_cp_write_page_scan_activity acp;
1754 u8 type;
1755
1756 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1757 return;
1758
1759 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1760 return;
1761
1762 if (enable) {
1763 type = PAGE_SCAN_TYPE_INTERLACED;
1764
1765 /* 160 msec page scan interval */
1766 acp.interval = cpu_to_le16(0x0100);
1767 } else {
1768 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1769
1770 /* default 1.28 sec page scan */
1771 acp.interval = cpu_to_le16(0x0800);
1772 }
1773
1774 acp.window = cpu_to_le16(0x0012);
1775
1776 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1777 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1779 sizeof(acp), &acp);
1780
1781 if (hdev->page_scan_type != type)
1782 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1783 }
1784
1785 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1786 u16 opcode)
1787 {
1788 struct pending_cmd *cmd;
1789 struct mgmt_mode *cp;
1790 bool conn_changed, discov_changed;
1791
1792 BT_DBG("status 0x%02x", status);
1793
1794 hci_dev_lock(hdev);
1795
1796 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1797 if (!cmd)
1798 goto unlock;
1799
1800 if (status) {
1801 u8 mgmt_err = mgmt_status(status);
1802 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1803 goto remove_cmd;
1804 }
1805
1806 cp = cmd->param;
1807 if (cp->val) {
1808 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1809 &hdev->dev_flags);
1810 discov_changed = false;
1811 } else {
1812 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1813 &hdev->dev_flags);
1814 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1815 &hdev->dev_flags);
1816 }
1817
1818 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1819
1820 if (conn_changed || discov_changed) {
1821 new_settings(hdev, cmd->sk);
1822 hci_update_page_scan(hdev);
1823 if (discov_changed)
1824 mgmt_update_adv_data(hdev);
1825 hci_update_background_scan(hdev);
1826 }
1827
1828 remove_cmd:
1829 mgmt_pending_remove(cmd);
1830
1831 unlock:
1832 hci_dev_unlock(hdev);
1833 }
1834
1835 static int set_connectable_update_settings(struct hci_dev *hdev,
1836 struct sock *sk, u8 val)
1837 {
1838 bool changed = false;
1839 int err;
1840
1841 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1842 changed = true;
1843
1844 if (val) {
1845 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1846 } else {
1847 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1848 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1849 }
1850
1851 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 if (err < 0)
1853 return err;
1854
1855 if (changed) {
1856 hci_update_page_scan(hdev);
1857 hci_update_background_scan(hdev);
1858 return new_settings(hdev, sk);
1859 }
1860
1861 return 0;
1862 }
1863
1864 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1865 u16 len)
1866 {
1867 struct mgmt_mode *cp = data;
1868 struct pending_cmd *cmd;
1869 struct hci_request req;
1870 u8 scan;
1871 int err;
1872
1873 BT_DBG("request for %s", hdev->name);
1874
1875 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1876 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_REJECTED);
1879
1880 if (cp->val != 0x00 && cp->val != 0x01)
1881 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1882 MGMT_STATUS_INVALID_PARAMS);
1883
1884 hci_dev_lock(hdev);
1885
1886 if (!hdev_is_powered(hdev)) {
1887 err = set_connectable_update_settings(hdev, sk, cp->val);
1888 goto failed;
1889 }
1890
1891 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1892 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1893 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1894 MGMT_STATUS_BUSY);
1895 goto failed;
1896 }
1897
1898 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1899 if (!cmd) {
1900 err = -ENOMEM;
1901 goto failed;
1902 }
1903
1904 hci_req_init(&req, hdev);
1905
1906 /* If BR/EDR is not enabled and we disable advertising as a
1907 * by-product of disabling connectable, we need to update the
1908 * advertising flags.
1909 */
1910 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1911 if (!cp->val) {
1912 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1913 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1914 }
1915 update_adv_data(&req);
1916 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1917 if (cp->val) {
1918 scan = SCAN_PAGE;
1919 } else {
1920 /* If we don't have any whitelist entries just
1921 * disable all scanning. If there are entries
1922 * and we had both page and inquiry scanning
1923 * enabled then fall back to only page scanning.
1924 * Otherwise no changes are needed.
1925 */
1926 if (list_empty(&hdev->whitelist))
1927 scan = SCAN_DISABLED;
1928 else if (test_bit(HCI_ISCAN, &hdev->flags))
1929 scan = SCAN_PAGE;
1930 else
1931 goto no_scan_update;
1932
1933 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1934 hdev->discov_timeout > 0)
1935 cancel_delayed_work(&hdev->discov_off);
1936 }
1937
1938 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1939 }
1940
1941 no_scan_update:
1942 /* If we're going from non-connectable to connectable or
1943 * vice-versa when fast connectable is enabled ensure that fast
1944 * connectable gets disabled. write_fast_connectable won't do
1945 * anything if the page scan parameters are already what they
1946 * should be.
1947 */
1948 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1949 write_fast_connectable(&req, false);
1950
1951 /* Update the advertising parameters if necessary */
1952 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1953 enable_advertising(&req);
1954
1955 err = hci_req_run(&req, set_connectable_complete);
1956 if (err < 0) {
1957 mgmt_pending_remove(cmd);
1958 if (err == -ENODATA)
1959 err = set_connectable_update_settings(hdev, sk,
1960 cp->val);
1961 goto failed;
1962 }
1963
1964 failed:
1965 hci_dev_unlock(hdev);
1966 return err;
1967 }
1968
1969 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1970 u16 len)
1971 {
1972 struct mgmt_mode *cp = data;
1973 bool changed;
1974 int err;
1975
1976 BT_DBG("request for %s", hdev->name);
1977
1978 if (cp->val != 0x00 && cp->val != 0x01)
1979 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1980 MGMT_STATUS_INVALID_PARAMS);
1981
1982 hci_dev_lock(hdev);
1983
1984 if (cp->val)
1985 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1986 else
1987 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1988
1989 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1990 if (err < 0)
1991 goto unlock;
1992
1993 if (changed)
1994 err = new_settings(hdev, sk);
1995
1996 unlock:
1997 hci_dev_unlock(hdev);
1998 return err;
1999 }
2000
2001 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2002 u16 len)
2003 {
2004 struct mgmt_mode *cp = data;
2005 struct pending_cmd *cmd;
2006 u8 val, status;
2007 int err;
2008
2009 BT_DBG("request for %s", hdev->name);
2010
2011 status = mgmt_bredr_support(hdev);
2012 if (status)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 status);
2015
2016 if (cp->val != 0x00 && cp->val != 0x01)
2017 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2018 MGMT_STATUS_INVALID_PARAMS);
2019
2020 hci_dev_lock(hdev);
2021
2022 if (!hdev_is_powered(hdev)) {
2023 bool changed = false;
2024
2025 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2026 &hdev->dev_flags)) {
2027 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2028 changed = true;
2029 }
2030
2031 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2032 if (err < 0)
2033 goto failed;
2034
2035 if (changed)
2036 err = new_settings(hdev, sk);
2037
2038 goto failed;
2039 }
2040
2041 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2042 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2043 MGMT_STATUS_BUSY);
2044 goto failed;
2045 }
2046
2047 val = !!cp->val;
2048
2049 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2050 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2051 goto failed;
2052 }
2053
2054 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2055 if (!cmd) {
2056 err = -ENOMEM;
2057 goto failed;
2058 }
2059
2060 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2061 if (err < 0) {
2062 mgmt_pending_remove(cmd);
2063 goto failed;
2064 }
2065
2066 failed:
2067 hci_dev_unlock(hdev);
2068 return err;
2069 }
2070
2071 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2072 {
2073 struct mgmt_mode *cp = data;
2074 struct pending_cmd *cmd;
2075 u8 status;
2076 int err;
2077
2078 BT_DBG("request for %s", hdev->name);
2079
2080 status = mgmt_bredr_support(hdev);
2081 if (status)
2082 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2083
2084 if (!lmp_ssp_capable(hdev))
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_NOT_SUPPORTED);
2087
2088 if (cp->val != 0x00 && cp->val != 0x01)
2089 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2090 MGMT_STATUS_INVALID_PARAMS);
2091
2092 hci_dev_lock(hdev);
2093
2094 if (!hdev_is_powered(hdev)) {
2095 bool changed;
2096
2097 if (cp->val) {
2098 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2099 &hdev->dev_flags);
2100 } else {
2101 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2102 &hdev->dev_flags);
2103 if (!changed)
2104 changed = test_and_clear_bit(HCI_HS_ENABLED,
2105 &hdev->dev_flags);
2106 else
2107 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2108 }
2109
2110 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2111 if (err < 0)
2112 goto failed;
2113
2114 if (changed)
2115 err = new_settings(hdev, sk);
2116
2117 goto failed;
2118 }
2119
2120 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2121 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2122 MGMT_STATUS_BUSY);
2123 goto failed;
2124 }
2125
2126 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2127 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 goto failed;
2129 }
2130
2131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2132 if (!cmd) {
2133 err = -ENOMEM;
2134 goto failed;
2135 }
2136
2137 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2138 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2139 sizeof(cp->val), &cp->val);
2140
2141 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2142 if (err < 0) {
2143 mgmt_pending_remove(cmd);
2144 goto failed;
2145 }
2146
2147 failed:
2148 hci_dev_unlock(hdev);
2149 return err;
2150 }
2151
2152 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2153 {
2154 struct mgmt_mode *cp = data;
2155 bool changed;
2156 u8 status;
2157 int err;
2158
2159 BT_DBG("request for %s", hdev->name);
2160
2161 status = mgmt_bredr_support(hdev);
2162 if (status)
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2164
2165 if (!lmp_ssp_capable(hdev))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 MGMT_STATUS_NOT_SUPPORTED);
2168
2169 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 MGMT_STATUS_REJECTED);
2172
2173 if (cp->val != 0x00 && cp->val != 0x01)
2174 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2175 MGMT_STATUS_INVALID_PARAMS);
2176
2177 hci_dev_lock(hdev);
2178
2179 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_BUSY);
2182 goto unlock;
2183 }
2184
2185 if (cp->val) {
2186 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2187 } else {
2188 if (hdev_is_powered(hdev)) {
2189 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2190 MGMT_STATUS_REJECTED);
2191 goto unlock;
2192 }
2193
2194 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2195 }
2196
2197 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2198 if (err < 0)
2199 goto unlock;
2200
2201 if (changed)
2202 err = new_settings(hdev, sk);
2203
2204 unlock:
2205 hci_dev_unlock(hdev);
2206 return err;
2207 }
2208
2209 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2210 {
2211 struct cmd_lookup match = { NULL, hdev };
2212
2213 hci_dev_lock(hdev);
2214
2215 if (status) {
2216 u8 mgmt_err = mgmt_status(status);
2217
2218 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2219 &mgmt_err);
2220 goto unlock;
2221 }
2222
2223 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2224
2225 new_settings(hdev, match.sk);
2226
2227 if (match.sk)
2228 sock_put(match.sk);
2229
2230 /* Make sure the controller has a good default for
2231 * advertising data. Restrict the update to when LE
2232 * has actually been enabled. During power on, the
2233 * update in powered_update_hci will take care of it.
2234 */
2235 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2236 struct hci_request req;
2237
2238 hci_req_init(&req, hdev);
2239 update_adv_data(&req);
2240 update_scan_rsp_data(&req);
2241 __hci_update_background_scan(&req);
2242 hci_req_run(&req, NULL);
2243 }
2244
2245 unlock:
2246 hci_dev_unlock(hdev);
2247 }
2248
2249 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2250 {
2251 struct mgmt_mode *cp = data;
2252 struct hci_cp_write_le_host_supported hci_cp;
2253 struct pending_cmd *cmd;
2254 struct hci_request req;
2255 int err;
2256 u8 val, enabled;
2257
2258 BT_DBG("request for %s", hdev->name);
2259
2260 if (!lmp_le_capable(hdev))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_NOT_SUPPORTED);
2263
2264 if (cp->val != 0x00 && cp->val != 0x01)
2265 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2266 MGMT_STATUS_INVALID_PARAMS);
2267
2268 /* LE-only devices do not allow toggling LE on/off */
2269 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2270 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2271 MGMT_STATUS_REJECTED);
2272
2273 hci_dev_lock(hdev);
2274
2275 val = !!cp->val;
2276 enabled = lmp_host_le_capable(hdev);
2277
2278 if (!hdev_is_powered(hdev) || val == enabled) {
2279 bool changed = false;
2280
2281 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2282 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2283 changed = true;
2284 }
2285
2286 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2288 changed = true;
2289 }
2290
2291 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2292 if (err < 0)
2293 goto unlock;
2294
2295 if (changed)
2296 err = new_settings(hdev, sk);
2297
2298 goto unlock;
2299 }
2300
2301 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2302 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2303 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2304 MGMT_STATUS_BUSY);
2305 goto unlock;
2306 }
2307
2308 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2309 if (!cmd) {
2310 err = -ENOMEM;
2311 goto unlock;
2312 }
2313
2314 hci_req_init(&req, hdev);
2315
2316 memset(&hci_cp, 0, sizeof(hci_cp));
2317
2318 if (val) {
2319 hci_cp.le = val;
2320 hci_cp.simul = 0x00;
2321 } else {
2322 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2323 disable_advertising(&req);
2324 }
2325
2326 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2327 &hci_cp);
2328
2329 err = hci_req_run(&req, le_enable_complete);
2330 if (err < 0)
2331 mgmt_pending_remove(cmd);
2332
2333 unlock:
2334 hci_dev_unlock(hdev);
2335 return err;
2336 }
2337
2338 /* This is a helper function to test for pending mgmt commands that can
2339 * cause CoD or EIR HCI commands. We can only allow one such pending
2340 * mgmt command at a time since otherwise we cannot easily track what
2341 * the current values are, will be, and based on that calculate if a new
2342 * HCI command needs to be sent and if yes with what value.
2343 */
2344 static bool pending_eir_or_class(struct hci_dev *hdev)
2345 {
2346 struct pending_cmd *cmd;
2347
2348 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2349 switch (cmd->opcode) {
2350 case MGMT_OP_ADD_UUID:
2351 case MGMT_OP_REMOVE_UUID:
2352 case MGMT_OP_SET_DEV_CLASS:
2353 case MGMT_OP_SET_POWERED:
2354 return true;
2355 }
2356 }
2357
2358 return false;
2359 }
2360
2361 static const u8 bluetooth_base_uuid[] = {
2362 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2363 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2364 };
2365
2366 static u8 get_uuid_size(const u8 *uuid)
2367 {
2368 u32 val;
2369
2370 if (memcmp(uuid, bluetooth_base_uuid, 12))
2371 return 128;
2372
2373 val = get_unaligned_le32(&uuid[12]);
2374 if (val > 0xffff)
2375 return 32;
2376
2377 return 16;
2378 }
2379
2380 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2381 {
2382 struct pending_cmd *cmd;
2383
2384 hci_dev_lock(hdev);
2385
2386 cmd = mgmt_pending_find(mgmt_op, hdev);
2387 if (!cmd)
2388 goto unlock;
2389
2390 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2391 hdev->dev_class, 3);
2392
2393 mgmt_pending_remove(cmd);
2394
2395 unlock:
2396 hci_dev_unlock(hdev);
2397 }
2398
2399 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2400 {
2401 BT_DBG("status 0x%02x", status);
2402
2403 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2404 }
2405
2406 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2407 {
2408 struct mgmt_cp_add_uuid *cp = data;
2409 struct pending_cmd *cmd;
2410 struct hci_request req;
2411 struct bt_uuid *uuid;
2412 int err;
2413
2414 BT_DBG("request for %s", hdev->name);
2415
2416 hci_dev_lock(hdev);
2417
2418 if (pending_eir_or_class(hdev)) {
2419 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2420 MGMT_STATUS_BUSY);
2421 goto failed;
2422 }
2423
2424 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2425 if (!uuid) {
2426 err = -ENOMEM;
2427 goto failed;
2428 }
2429
2430 memcpy(uuid->uuid, cp->uuid, 16);
2431 uuid->svc_hint = cp->svc_hint;
2432 uuid->size = get_uuid_size(cp->uuid);
2433
2434 list_add_tail(&uuid->list, &hdev->uuids);
2435
2436 hci_req_init(&req, hdev);
2437
2438 update_class(&req);
2439 update_eir(&req);
2440
2441 err = hci_req_run(&req, add_uuid_complete);
2442 if (err < 0) {
2443 if (err != -ENODATA)
2444 goto failed;
2445
2446 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2447 hdev->dev_class, 3);
2448 goto failed;
2449 }
2450
2451 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2452 if (!cmd) {
2453 err = -ENOMEM;
2454 goto failed;
2455 }
2456
2457 err = 0;
2458
2459 failed:
2460 hci_dev_unlock(hdev);
2461 return err;
2462 }
2463
2464 static bool enable_service_cache(struct hci_dev *hdev)
2465 {
2466 if (!hdev_is_powered(hdev))
2467 return false;
2468
2469 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2470 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2471 CACHE_TIMEOUT);
2472 return true;
2473 }
2474
2475 return false;
2476 }
2477
2478 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2479 {
2480 BT_DBG("status 0x%02x", status);
2481
2482 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2483 }
2484
2485 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2486 u16 len)
2487 {
2488 struct mgmt_cp_remove_uuid *cp = data;
2489 struct pending_cmd *cmd;
2490 struct bt_uuid *match, *tmp;
2491 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2492 struct hci_request req;
2493 int err, found;
2494
2495 BT_DBG("request for %s", hdev->name);
2496
2497 hci_dev_lock(hdev);
2498
2499 if (pending_eir_or_class(hdev)) {
2500 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 MGMT_STATUS_BUSY);
2502 goto unlock;
2503 }
2504
2505 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2506 hci_uuids_clear(hdev);
2507
2508 if (enable_service_cache(hdev)) {
2509 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2510 0, hdev->dev_class, 3);
2511 goto unlock;
2512 }
2513
2514 goto update_class;
2515 }
2516
2517 found = 0;
2518
2519 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2520 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2521 continue;
2522
2523 list_del(&match->list);
2524 kfree(match);
2525 found++;
2526 }
2527
2528 if (found == 0) {
2529 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2530 MGMT_STATUS_INVALID_PARAMS);
2531 goto unlock;
2532 }
2533
2534 update_class:
2535 hci_req_init(&req, hdev);
2536
2537 update_class(&req);
2538 update_eir(&req);
2539
2540 err = hci_req_run(&req, remove_uuid_complete);
2541 if (err < 0) {
2542 if (err != -ENODATA)
2543 goto unlock;
2544
2545 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2546 hdev->dev_class, 3);
2547 goto unlock;
2548 }
2549
2550 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2551 if (!cmd) {
2552 err = -ENOMEM;
2553 goto unlock;
2554 }
2555
2556 err = 0;
2557
2558 unlock:
2559 hci_dev_unlock(hdev);
2560 return err;
2561 }
2562
2563 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2564 {
2565 BT_DBG("status 0x%02x", status);
2566
2567 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2568 }
2569
2570 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2571 u16 len)
2572 {
2573 struct mgmt_cp_set_dev_class *cp = data;
2574 struct pending_cmd *cmd;
2575 struct hci_request req;
2576 int err;
2577
2578 BT_DBG("request for %s", hdev->name);
2579
2580 if (!lmp_bredr_capable(hdev))
2581 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 MGMT_STATUS_NOT_SUPPORTED);
2583
2584 hci_dev_lock(hdev);
2585
2586 if (pending_eir_or_class(hdev)) {
2587 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2588 MGMT_STATUS_BUSY);
2589 goto unlock;
2590 }
2591
2592 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2594 MGMT_STATUS_INVALID_PARAMS);
2595 goto unlock;
2596 }
2597
2598 hdev->major_class = cp->major;
2599 hdev->minor_class = cp->minor;
2600
2601 if (!hdev_is_powered(hdev)) {
2602 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2603 hdev->dev_class, 3);
2604 goto unlock;
2605 }
2606
2607 hci_req_init(&req, hdev);
2608
2609 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2610 hci_dev_unlock(hdev);
2611 cancel_delayed_work_sync(&hdev->service_cache);
2612 hci_dev_lock(hdev);
2613 update_eir(&req);
2614 }
2615
2616 update_class(&req);
2617
2618 err = hci_req_run(&req, set_class_complete);
2619 if (err < 0) {
2620 if (err != -ENODATA)
2621 goto unlock;
2622
2623 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2624 hdev->dev_class, 3);
2625 goto unlock;
2626 }
2627
2628 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2629 if (!cmd) {
2630 err = -ENOMEM;
2631 goto unlock;
2632 }
2633
2634 err = 0;
2635
2636 unlock:
2637 hci_dev_unlock(hdev);
2638 return err;
2639 }
2640
2641 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2642 u16 len)
2643 {
2644 struct mgmt_cp_load_link_keys *cp = data;
2645 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2646 sizeof(struct mgmt_link_key_info));
2647 u16 key_count, expected_len;
2648 bool changed;
2649 int i;
2650
2651 BT_DBG("request for %s", hdev->name);
2652
2653 if (!lmp_bredr_capable(hdev))
2654 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2655 MGMT_STATUS_NOT_SUPPORTED);
2656
2657 key_count = __le16_to_cpu(cp->key_count);
2658 if (key_count > max_key_count) {
2659 BT_ERR("load_link_keys: too big key_count value %u",
2660 key_count);
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2663 }
2664
2665 expected_len = sizeof(*cp) + key_count *
2666 sizeof(struct mgmt_link_key_info);
2667 if (expected_len != len) {
2668 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2669 expected_len, len);
2670 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2671 MGMT_STATUS_INVALID_PARAMS);
2672 }
2673
2674 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2675 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 MGMT_STATUS_INVALID_PARAMS);
2677
2678 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2679 key_count);
2680
2681 for (i = 0; i < key_count; i++) {
2682 struct mgmt_link_key_info *key = &cp->keys[i];
2683
2684 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2685 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2686 MGMT_STATUS_INVALID_PARAMS);
2687 }
2688
2689 hci_dev_lock(hdev);
2690
2691 hci_link_keys_clear(hdev);
2692
2693 if (cp->debug_keys)
2694 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2695 &hdev->dev_flags);
2696 else
2697 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2698 &hdev->dev_flags);
2699
2700 if (changed)
2701 new_settings(hdev, NULL);
2702
2703 for (i = 0; i < key_count; i++) {
2704 struct mgmt_link_key_info *key = &cp->keys[i];
2705
2706 /* Always ignore debug keys and require a new pairing if
2707 * the user wants to use them.
2708 */
2709 if (key->type == HCI_LK_DEBUG_COMBINATION)
2710 continue;
2711
2712 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2713 key->type, key->pin_len, NULL);
2714 }
2715
2716 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2717
2718 hci_dev_unlock(hdev);
2719
2720 return 0;
2721 }
2722
2723 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2724 u8 addr_type, struct sock *skip_sk)
2725 {
2726 struct mgmt_ev_device_unpaired ev;
2727
2728 bacpy(&ev.addr.bdaddr, bdaddr);
2729 ev.addr.type = addr_type;
2730
2731 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2732 skip_sk);
2733 }
2734
2735 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2736 u16 len)
2737 {
2738 struct mgmt_cp_unpair_device *cp = data;
2739 struct mgmt_rp_unpair_device rp;
2740 struct hci_cp_disconnect dc;
2741 struct pending_cmd *cmd;
2742 struct hci_conn *conn;
2743 int err;
2744
2745 memset(&rp, 0, sizeof(rp));
2746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2747 rp.addr.type = cp->addr.type;
2748
2749 if (!bdaddr_type_is_valid(cp->addr.type))
2750 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2751 MGMT_STATUS_INVALID_PARAMS,
2752 &rp, sizeof(rp));
2753
2754 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2755 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2756 MGMT_STATUS_INVALID_PARAMS,
2757 &rp, sizeof(rp));
2758
2759 hci_dev_lock(hdev);
2760
2761 if (!hdev_is_powered(hdev)) {
2762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2763 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2764 goto unlock;
2765 }
2766
2767 if (cp->addr.type == BDADDR_BREDR) {
2768 /* If disconnection is requested, then look up the
2769 * connection. If the remote device is connected, it
2770 * will be later used to terminate the link.
2771 *
2772 * Setting it to NULL explicitly will cause no
2773 * termination of the link.
2774 */
2775 if (cp->disconnect)
2776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2777 &cp->addr.bdaddr);
2778 else
2779 conn = NULL;
2780
2781 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2782 } else {
2783 u8 addr_type;
2784
2785 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2786 &cp->addr.bdaddr);
2787 if (conn) {
2788 /* Defer clearing up the connection parameters
2789 * until closing to give a chance of keeping
2790 * them if a repairing happens.
2791 */
2792 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2793
2794 /* If disconnection is not requested, then
2795 * clear the connection variable so that the
2796 * link is not terminated.
2797 */
2798 if (!cp->disconnect)
2799 conn = NULL;
2800 }
2801
2802 if (cp->addr.type == BDADDR_LE_PUBLIC)
2803 addr_type = ADDR_LE_DEV_PUBLIC;
2804 else
2805 addr_type = ADDR_LE_DEV_RANDOM;
2806
2807 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2808
2809 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2810 }
2811
2812 if (err < 0) {
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2814 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2815 goto unlock;
2816 }
2817
2818 /* If the connection variable is set, then termination of the
2819 * link is requested.
2820 */
2821 if (!conn) {
2822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2823 &rp, sizeof(rp));
2824 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2825 goto unlock;
2826 }
2827
2828 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2829 sizeof(*cp));
2830 if (!cmd) {
2831 err = -ENOMEM;
2832 goto unlock;
2833 }
2834
2835 cmd->cmd_complete = addr_cmd_complete;
2836
2837 dc.handle = cpu_to_le16(conn->handle);
2838 dc.reason = 0x13; /* Remote User Terminated Connection */
2839 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2840 if (err < 0)
2841 mgmt_pending_remove(cmd);
2842
2843 unlock:
2844 hci_dev_unlock(hdev);
2845 return err;
2846 }
2847
2848 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2849 u16 len)
2850 {
2851 struct mgmt_cp_disconnect *cp = data;
2852 struct mgmt_rp_disconnect rp;
2853 struct pending_cmd *cmd;
2854 struct hci_conn *conn;
2855 int err;
2856
2857 BT_DBG("");
2858
2859 memset(&rp, 0, sizeof(rp));
2860 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2861 rp.addr.type = cp->addr.type;
2862
2863 if (!bdaddr_type_is_valid(cp->addr.type))
2864 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2865 MGMT_STATUS_INVALID_PARAMS,
2866 &rp, sizeof(rp));
2867
2868 hci_dev_lock(hdev);
2869
2870 if (!test_bit(HCI_UP, &hdev->flags)) {
2871 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2872 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2873 goto failed;
2874 }
2875
2876 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2877 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2878 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2879 goto failed;
2880 }
2881
2882 if (cp->addr.type == BDADDR_BREDR)
2883 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2884 &cp->addr.bdaddr);
2885 else
2886 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2887
2888 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2889 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2890 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2891 goto failed;
2892 }
2893
2894 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2895 if (!cmd) {
2896 err = -ENOMEM;
2897 goto failed;
2898 }
2899
2900 cmd->cmd_complete = generic_cmd_complete;
2901
2902 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2903 if (err < 0)
2904 mgmt_pending_remove(cmd);
2905
2906 failed:
2907 hci_dev_unlock(hdev);
2908 return err;
2909 }
2910
2911 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2912 {
2913 switch (link_type) {
2914 case LE_LINK:
2915 switch (addr_type) {
2916 case ADDR_LE_DEV_PUBLIC:
2917 return BDADDR_LE_PUBLIC;
2918
2919 default:
2920 /* Fallback to LE Random address type */
2921 return BDADDR_LE_RANDOM;
2922 }
2923
2924 default:
2925 /* Fallback to BR/EDR type */
2926 return BDADDR_BREDR;
2927 }
2928 }
2929
2930 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2931 u16 data_len)
2932 {
2933 struct mgmt_rp_get_connections *rp;
2934 struct hci_conn *c;
2935 size_t rp_len;
2936 int err;
2937 u16 i;
2938
2939 BT_DBG("");
2940
2941 hci_dev_lock(hdev);
2942
2943 if (!hdev_is_powered(hdev)) {
2944 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2945 MGMT_STATUS_NOT_POWERED);
2946 goto unlock;
2947 }
2948
2949 i = 0;
2950 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2951 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2952 i++;
2953 }
2954
2955 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2956 rp = kmalloc(rp_len, GFP_KERNEL);
2957 if (!rp) {
2958 err = -ENOMEM;
2959 goto unlock;
2960 }
2961
2962 i = 0;
2963 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2964 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2965 continue;
2966 bacpy(&rp->addr[i].bdaddr, &c->dst);
2967 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2968 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2969 continue;
2970 i++;
2971 }
2972
2973 rp->conn_count = cpu_to_le16(i);
2974
2975 /* Recalculate length in case of filtered SCO connections, etc */
2976 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2977
2978 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2979 rp_len);
2980
2981 kfree(rp);
2982
2983 unlock:
2984 hci_dev_unlock(hdev);
2985 return err;
2986 }
2987
2988 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2989 struct mgmt_cp_pin_code_neg_reply *cp)
2990 {
2991 struct pending_cmd *cmd;
2992 int err;
2993
2994 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2995 sizeof(*cp));
2996 if (!cmd)
2997 return -ENOMEM;
2998
2999 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3000 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3001 if (err < 0)
3002 mgmt_pending_remove(cmd);
3003
3004 return err;
3005 }
3006
3007 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3008 u16 len)
3009 {
3010 struct hci_conn *conn;
3011 struct mgmt_cp_pin_code_reply *cp = data;
3012 struct hci_cp_pin_code_reply reply;
3013 struct pending_cmd *cmd;
3014 int err;
3015
3016 BT_DBG("");
3017
3018 hci_dev_lock(hdev);
3019
3020 if (!hdev_is_powered(hdev)) {
3021 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3022 MGMT_STATUS_NOT_POWERED);
3023 goto failed;
3024 }
3025
3026 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3027 if (!conn) {
3028 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3029 MGMT_STATUS_NOT_CONNECTED);
3030 goto failed;
3031 }
3032
3033 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3034 struct mgmt_cp_pin_code_neg_reply ncp;
3035
3036 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3037
3038 BT_ERR("PIN code is not 16 bytes long");
3039
3040 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3041 if (err >= 0)
3042 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3043 MGMT_STATUS_INVALID_PARAMS);
3044
3045 goto failed;
3046 }
3047
3048 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3049 if (!cmd) {
3050 err = -ENOMEM;
3051 goto failed;
3052 }
3053
3054 cmd->cmd_complete = addr_cmd_complete;
3055
3056 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3057 reply.pin_len = cp->pin_len;
3058 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3059
3060 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3061 if (err < 0)
3062 mgmt_pending_remove(cmd);
3063
3064 failed:
3065 hci_dev_unlock(hdev);
3066 return err;
3067 }
3068
3069 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3070 u16 len)
3071 {
3072 struct mgmt_cp_set_io_capability *cp = data;
3073
3074 BT_DBG("");
3075
3076 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3077 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3078 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3079
3080 hci_dev_lock(hdev);
3081
3082 hdev->io_capability = cp->io_capability;
3083
3084 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3085 hdev->io_capability);
3086
3087 hci_dev_unlock(hdev);
3088
3089 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3090 0);
3091 }
3092
3093 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3094 {
3095 struct hci_dev *hdev = conn->hdev;
3096 struct pending_cmd *cmd;
3097
3098 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3099 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3100 continue;
3101
3102 if (cmd->user_data != conn)
3103 continue;
3104
3105 return cmd;
3106 }
3107
3108 return NULL;
3109 }
3110
3111 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3112 {
3113 struct mgmt_rp_pair_device rp;
3114 struct hci_conn *conn = cmd->user_data;
3115 int err;
3116
3117 bacpy(&rp.addr.bdaddr, &conn->dst);
3118 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3119
3120 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3121 &rp, sizeof(rp));
3122
3123 /* So we don't get further callbacks for this connection */
3124 conn->connect_cfm_cb = NULL;
3125 conn->security_cfm_cb = NULL;
3126 conn->disconn_cfm_cb = NULL;
3127
3128 hci_conn_drop(conn);
3129
3130 /* The device is paired so there is no need to remove
3131 * its connection parameters anymore.
3132 */
3133 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3134
3135 hci_conn_put(conn);
3136
3137 return err;
3138 }
3139
3140 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3141 {
3142 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3143 struct pending_cmd *cmd;
3144
3145 cmd = find_pairing(conn);
3146 if (cmd) {
3147 cmd->cmd_complete(cmd, status);
3148 mgmt_pending_remove(cmd);
3149 }
3150 }
3151
3152 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3153 {
3154 struct pending_cmd *cmd;
3155
3156 BT_DBG("status %u", status);
3157
3158 cmd = find_pairing(conn);
3159 if (!cmd) {
3160 BT_DBG("Unable to find a pending command");
3161 return;
3162 }
3163
3164 cmd->cmd_complete(cmd, mgmt_status(status));
3165 mgmt_pending_remove(cmd);
3166 }
3167
3168 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3169 {
3170 struct pending_cmd *cmd;
3171
3172 BT_DBG("status %u", status);
3173
3174 if (!status)
3175 return;
3176
3177 cmd = find_pairing(conn);
3178 if (!cmd) {
3179 BT_DBG("Unable to find a pending command");
3180 return;
3181 }
3182
3183 cmd->cmd_complete(cmd, mgmt_status(status));
3184 mgmt_pending_remove(cmd);
3185 }
3186
3187 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3188 u16 len)
3189 {
3190 struct mgmt_cp_pair_device *cp = data;
3191 struct mgmt_rp_pair_device rp;
3192 struct pending_cmd *cmd;
3193 u8 sec_level, auth_type;
3194 struct hci_conn *conn;
3195 int err;
3196
3197 BT_DBG("");
3198
3199 memset(&rp, 0, sizeof(rp));
3200 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3201 rp.addr.type = cp->addr.type;
3202
3203 if (!bdaddr_type_is_valid(cp->addr.type))
3204 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3205 MGMT_STATUS_INVALID_PARAMS,
3206 &rp, sizeof(rp));
3207
3208 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3209 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3210 MGMT_STATUS_INVALID_PARAMS,
3211 &rp, sizeof(rp));
3212
3213 hci_dev_lock(hdev);
3214
3215 if (!hdev_is_powered(hdev)) {
3216 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3217 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3218 goto unlock;
3219 }
3220
3221 sec_level = BT_SECURITY_MEDIUM;
3222 auth_type = HCI_AT_DEDICATED_BONDING;
3223
3224 if (cp->addr.type == BDADDR_BREDR) {
3225 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3226 auth_type);
3227 } else {
3228 u8 addr_type;
3229
3230 /* Convert from L2CAP channel address type to HCI address type
3231 */
3232 if (cp->addr.type == BDADDR_LE_PUBLIC)
3233 addr_type = ADDR_LE_DEV_PUBLIC;
3234 else
3235 addr_type = ADDR_LE_DEV_RANDOM;
3236
3237 /* When pairing a new device, it is expected to remember
3238 * this device for future connections. Adding the connection
3239 * parameter information ahead of time allows tracking
3240 * of the slave preferred values and will speed up any
3241 * further connection establishment.
3242 *
3243 * If connection parameters already exist, then they
3244 * will be kept and this function does nothing.
3245 */
3246 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3247
3248 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3249 sec_level, HCI_LE_CONN_TIMEOUT,
3250 HCI_ROLE_MASTER);
3251 }
3252
3253 if (IS_ERR(conn)) {
3254 int status;
3255
3256 if (PTR_ERR(conn) == -EBUSY)
3257 status = MGMT_STATUS_BUSY;
3258 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3259 status = MGMT_STATUS_NOT_SUPPORTED;
3260 else if (PTR_ERR(conn) == -ECONNREFUSED)
3261 status = MGMT_STATUS_REJECTED;
3262 else
3263 status = MGMT_STATUS_CONNECT_FAILED;
3264
3265 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3266 status, &rp,
3267 sizeof(rp));
3268 goto unlock;
3269 }
3270
3271 if (conn->connect_cfm_cb) {
3272 hci_conn_drop(conn);
3273 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3274 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3275 goto unlock;
3276 }
3277
3278 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3279 if (!cmd) {
3280 err = -ENOMEM;
3281 hci_conn_drop(conn);
3282 goto unlock;
3283 }
3284
3285 cmd->cmd_complete = pairing_complete;
3286
3287 /* For LE, just connecting isn't a proof that the pairing finished */
3288 if (cp->addr.type == BDADDR_BREDR) {
3289 conn->connect_cfm_cb = pairing_complete_cb;
3290 conn->security_cfm_cb = pairing_complete_cb;
3291 conn->disconn_cfm_cb = pairing_complete_cb;
3292 } else {
3293 conn->connect_cfm_cb = le_pairing_complete_cb;
3294 conn->security_cfm_cb = le_pairing_complete_cb;
3295 conn->disconn_cfm_cb = le_pairing_complete_cb;
3296 }
3297
3298 conn->io_capability = cp->io_cap;
3299 cmd->user_data = hci_conn_get(conn);
3300
3301 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3302 hci_conn_security(conn, sec_level, auth_type, true)) {
3303 cmd->cmd_complete(cmd, 0);
3304 mgmt_pending_remove(cmd);
3305 }
3306
3307 err = 0;
3308
3309 unlock:
3310 hci_dev_unlock(hdev);
3311 return err;
3312 }
3313
3314 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3315 u16 len)
3316 {
3317 struct mgmt_addr_info *addr = data;
3318 struct pending_cmd *cmd;
3319 struct hci_conn *conn;
3320 int err;
3321
3322 BT_DBG("");
3323
3324 hci_dev_lock(hdev);
3325
3326 if (!hdev_is_powered(hdev)) {
3327 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3328 MGMT_STATUS_NOT_POWERED);
3329 goto unlock;
3330 }
3331
3332 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3333 if (!cmd) {
3334 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3335 MGMT_STATUS_INVALID_PARAMS);
3336 goto unlock;
3337 }
3338
3339 conn = cmd->user_data;
3340
3341 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3342 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3343 MGMT_STATUS_INVALID_PARAMS);
3344 goto unlock;
3345 }
3346
3347 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3348 mgmt_pending_remove(cmd);
3349
3350 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3351 addr, sizeof(*addr));
3352 unlock:
3353 hci_dev_unlock(hdev);
3354 return err;
3355 }
3356
3357 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3358 struct mgmt_addr_info *addr, u16 mgmt_op,
3359 u16 hci_op, __le32 passkey)
3360 {
3361 struct pending_cmd *cmd;
3362 struct hci_conn *conn;
3363 int err;
3364
3365 hci_dev_lock(hdev);
3366
3367 if (!hdev_is_powered(hdev)) {
3368 err = cmd_complete(sk, hdev->id, mgmt_op,
3369 MGMT_STATUS_NOT_POWERED, addr,
3370 sizeof(*addr));
3371 goto done;
3372 }
3373
3374 if (addr->type == BDADDR_BREDR)
3375 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3376 else
3377 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3378
3379 if (!conn) {
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_NOT_CONNECTED, addr,
3382 sizeof(*addr));
3383 goto done;
3384 }
3385
3386 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3387 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3388 if (!err)
3389 err = cmd_complete(sk, hdev->id, mgmt_op,
3390 MGMT_STATUS_SUCCESS, addr,
3391 sizeof(*addr));
3392 else
3393 err = cmd_complete(sk, hdev->id, mgmt_op,
3394 MGMT_STATUS_FAILED, addr,
3395 sizeof(*addr));
3396
3397 goto done;
3398 }
3399
3400 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3401 if (!cmd) {
3402 err = -ENOMEM;
3403 goto done;
3404 }
3405
3406 cmd->cmd_complete = addr_cmd_complete;
3407
3408 /* Continue with pairing via HCI */
3409 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3410 struct hci_cp_user_passkey_reply cp;
3411
3412 bacpy(&cp.bdaddr, &addr->bdaddr);
3413 cp.passkey = passkey;
3414 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3415 } else
3416 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3417 &addr->bdaddr);
3418
3419 if (err < 0)
3420 mgmt_pending_remove(cmd);
3421
3422 done:
3423 hci_dev_unlock(hdev);
3424 return err;
3425 }
3426
3427 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3428 void *data, u16 len)
3429 {
3430 struct mgmt_cp_pin_code_neg_reply *cp = data;
3431
3432 BT_DBG("");
3433
3434 return user_pairing_resp(sk, hdev, &cp->addr,
3435 MGMT_OP_PIN_CODE_NEG_REPLY,
3436 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3437 }
3438
3439 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3440 u16 len)
3441 {
3442 struct mgmt_cp_user_confirm_reply *cp = data;
3443
3444 BT_DBG("");
3445
3446 if (len != sizeof(*cp))
3447 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3448 MGMT_STATUS_INVALID_PARAMS);
3449
3450 return user_pairing_resp(sk, hdev, &cp->addr,
3451 MGMT_OP_USER_CONFIRM_REPLY,
3452 HCI_OP_USER_CONFIRM_REPLY, 0);
3453 }
3454
3455 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3456 void *data, u16 len)
3457 {
3458 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3459
3460 BT_DBG("");
3461
3462 return user_pairing_resp(sk, hdev, &cp->addr,
3463 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3464 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3465 }
3466
3467 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3468 u16 len)
3469 {
3470 struct mgmt_cp_user_passkey_reply *cp = data;
3471
3472 BT_DBG("");
3473
3474 return user_pairing_resp(sk, hdev, &cp->addr,
3475 MGMT_OP_USER_PASSKEY_REPLY,
3476 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3477 }
3478
3479 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3480 void *data, u16 len)
3481 {
3482 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3483
3484 BT_DBG("");
3485
3486 return user_pairing_resp(sk, hdev, &cp->addr,
3487 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3488 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3489 }
3490
3491 static void update_name(struct hci_request *req)
3492 {
3493 struct hci_dev *hdev = req->hdev;
3494 struct hci_cp_write_local_name cp;
3495
3496 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3497
3498 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3499 }
3500
3501 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3502 {
3503 struct mgmt_cp_set_local_name *cp;
3504 struct pending_cmd *cmd;
3505
3506 BT_DBG("status 0x%02x", status);
3507
3508 hci_dev_lock(hdev);
3509
3510 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3511 if (!cmd)
3512 goto unlock;
3513
3514 cp = cmd->param;
3515
3516 if (status)
3517 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3518 mgmt_status(status));
3519 else
3520 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3521 cp, sizeof(*cp));
3522
3523 mgmt_pending_remove(cmd);
3524
3525 unlock:
3526 hci_dev_unlock(hdev);
3527 }
3528
3529 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3530 u16 len)
3531 {
3532 struct mgmt_cp_set_local_name *cp = data;
3533 struct pending_cmd *cmd;
3534 struct hci_request req;
3535 int err;
3536
3537 BT_DBG("");
3538
3539 hci_dev_lock(hdev);
3540
3541 /* If the old values are the same as the new ones just return a
3542 * direct command complete event.
3543 */
3544 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3545 !memcmp(hdev->short_name, cp->short_name,
3546 sizeof(hdev->short_name))) {
3547 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3548 data, len);
3549 goto failed;
3550 }
3551
3552 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3553
3554 if (!hdev_is_powered(hdev)) {
3555 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3556
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3558 data, len);
3559 if (err < 0)
3560 goto failed;
3561
3562 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3563 sk);
3564
3565 goto failed;
3566 }
3567
3568 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3569 if (!cmd) {
3570 err = -ENOMEM;
3571 goto failed;
3572 }
3573
3574 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3575
3576 hci_req_init(&req, hdev);
3577
3578 if (lmp_bredr_capable(hdev)) {
3579 update_name(&req);
3580 update_eir(&req);
3581 }
3582
3583 /* The name is stored in the scan response data and so
3584 * no need to udpate the advertising data here.
3585 */
3586 if (lmp_le_capable(hdev))
3587 update_scan_rsp_data(&req);
3588
3589 err = hci_req_run(&req, set_name_complete);
3590 if (err < 0)
3591 mgmt_pending_remove(cmd);
3592
3593 failed:
3594 hci_dev_unlock(hdev);
3595 return err;
3596 }
3597
3598 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3599 void *data, u16 data_len)
3600 {
3601 struct pending_cmd *cmd;
3602 int err;
3603
3604 BT_DBG("%s", hdev->name);
3605
3606 hci_dev_lock(hdev);
3607
3608 if (!hdev_is_powered(hdev)) {
3609 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3610 MGMT_STATUS_NOT_POWERED);
3611 goto unlock;
3612 }
3613
3614 if (!lmp_ssp_capable(hdev)) {
3615 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3616 MGMT_STATUS_NOT_SUPPORTED);
3617 goto unlock;
3618 }
3619
3620 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3621 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3622 MGMT_STATUS_BUSY);
3623 goto unlock;
3624 }
3625
3626 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3627 if (!cmd) {
3628 err = -ENOMEM;
3629 goto unlock;
3630 }
3631
3632 if (bredr_sc_enabled(hdev))
3633 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3634 0, NULL);
3635 else
3636 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3637
3638 if (err < 0)
3639 mgmt_pending_remove(cmd);
3640
3641 unlock:
3642 hci_dev_unlock(hdev);
3643 return err;
3644 }
3645
3646 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3647 void *data, u16 len)
3648 {
3649 struct mgmt_addr_info *addr = data;
3650 int err;
3651
3652 BT_DBG("%s ", hdev->name);
3653
3654 if (!bdaddr_type_is_valid(addr->type))
3655 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3656 MGMT_STATUS_INVALID_PARAMS, addr,
3657 sizeof(*addr));
3658
3659 hci_dev_lock(hdev);
3660
3661 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3662 struct mgmt_cp_add_remote_oob_data *cp = data;
3663 u8 status;
3664
3665 if (cp->addr.type != BDADDR_BREDR) {
3666 err = cmd_complete(sk, hdev->id,
3667 MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 MGMT_STATUS_INVALID_PARAMS,
3669 &cp->addr, sizeof(cp->addr));
3670 goto unlock;
3671 }
3672
3673 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3674 cp->addr.type, cp->hash,
3675 cp->rand, NULL, NULL);
3676 if (err < 0)
3677 status = MGMT_STATUS_FAILED;
3678 else
3679 status = MGMT_STATUS_SUCCESS;
3680
3681 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3682 status, &cp->addr, sizeof(cp->addr));
3683 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3684 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3685 u8 *rand192, *hash192, *rand256, *hash256;
3686 u8 status;
3687
3688 if (bdaddr_type_is_le(cp->addr.type)) {
3689 /* Enforce zero-valued 192-bit parameters as
3690 * long as legacy SMP OOB isn't implemented.
3691 */
3692 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3693 memcmp(cp->hash192, ZERO_KEY, 16)) {
3694 err = cmd_complete(sk, hdev->id,
3695 MGMT_OP_ADD_REMOTE_OOB_DATA,
3696 MGMT_STATUS_INVALID_PARAMS,
3697 addr, sizeof(*addr));
3698 goto unlock;
3699 }
3700
3701 rand192 = NULL;
3702 hash192 = NULL;
3703 } else {
3704 /* In case one of the P-192 values is set to zero,
3705 * then just disable OOB data for P-192.
3706 */
3707 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3708 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3709 rand192 = NULL;
3710 hash192 = NULL;
3711 } else {
3712 rand192 = cp->rand192;
3713 hash192 = cp->hash192;
3714 }
3715 }
3716
3717 /* In case one of the P-256 values is set to zero, then just
3718 * disable OOB data for P-256.
3719 */
3720 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3721 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3722 rand256 = NULL;
3723 hash256 = NULL;
3724 } else {
3725 rand256 = cp->rand256;
3726 hash256 = cp->hash256;
3727 }
3728
3729 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3730 cp->addr.type, hash192, rand192,
3731 hash256, rand256);
3732 if (err < 0)
3733 status = MGMT_STATUS_FAILED;
3734 else
3735 status = MGMT_STATUS_SUCCESS;
3736
3737 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3738 status, &cp->addr, sizeof(cp->addr));
3739 } else {
3740 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3741 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3742 MGMT_STATUS_INVALID_PARAMS);
3743 }
3744
3745 unlock:
3746 hci_dev_unlock(hdev);
3747 return err;
3748 }
3749
3750 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3751 void *data, u16 len)
3752 {
3753 struct mgmt_cp_remove_remote_oob_data *cp = data;
3754 u8 status;
3755 int err;
3756
3757 BT_DBG("%s", hdev->name);
3758
3759 if (cp->addr.type != BDADDR_BREDR)
3760 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3761 MGMT_STATUS_INVALID_PARAMS,
3762 &cp->addr, sizeof(cp->addr));
3763
3764 hci_dev_lock(hdev);
3765
3766 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3767 hci_remote_oob_data_clear(hdev);
3768 status = MGMT_STATUS_SUCCESS;
3769 goto done;
3770 }
3771
3772 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3773 if (err < 0)
3774 status = MGMT_STATUS_INVALID_PARAMS;
3775 else
3776 status = MGMT_STATUS_SUCCESS;
3777
3778 done:
3779 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3780 status, &cp->addr, sizeof(cp->addr));
3781
3782 hci_dev_unlock(hdev);
3783 return err;
3784 }
3785
3786 static bool trigger_discovery(struct hci_request *req, u8 *status)
3787 {
3788 struct hci_dev *hdev = req->hdev;
3789 struct hci_cp_le_set_scan_param param_cp;
3790 struct hci_cp_le_set_scan_enable enable_cp;
3791 struct hci_cp_inquiry inq_cp;
3792 /* General inquiry access code (GIAC) */
3793 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3794 u8 own_addr_type;
3795 int err;
3796
3797 switch (hdev->discovery.type) {
3798 case DISCOV_TYPE_BREDR:
3799 *status = mgmt_bredr_support(hdev);
3800 if (*status)
3801 return false;
3802
3803 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3804 *status = MGMT_STATUS_BUSY;
3805 return false;
3806 }
3807
3808 hci_inquiry_cache_flush(hdev);
3809
3810 memset(&inq_cp, 0, sizeof(inq_cp));
3811 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3812 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3813 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3814 break;
3815
3816 case DISCOV_TYPE_LE:
3817 case DISCOV_TYPE_INTERLEAVED:
3818 *status = mgmt_le_support(hdev);
3819 if (*status)
3820 return false;
3821
3822 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3823 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3824 *status = MGMT_STATUS_NOT_SUPPORTED;
3825 return false;
3826 }
3827
3828 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3829 /* Don't let discovery abort an outgoing
3830 * connection attempt that's using directed
3831 * advertising.
3832 */
3833 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3834 BT_CONNECT)) {
3835 *status = MGMT_STATUS_REJECTED;
3836 return false;
3837 }
3838
3839 disable_advertising(req);
3840 }
3841
3842 /* If controller is scanning, it means the background scanning
3843 * is running. Thus, we should temporarily stop it in order to
3844 * set the discovery scanning parameters.
3845 */
3846 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3847 hci_req_add_le_scan_disable(req);
3848
3849 memset(&param_cp, 0, sizeof(param_cp));
3850
3851 /* All active scans will be done with either a resolvable
3852 * private address (when privacy feature has been enabled)
3853 * or non-resolvable private address.
3854 */
3855 err = hci_update_random_address(req, true, &own_addr_type);
3856 if (err < 0) {
3857 *status = MGMT_STATUS_FAILED;
3858 return false;
3859 }
3860
3861 param_cp.type = LE_SCAN_ACTIVE;
3862 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3863 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3864 param_cp.own_address_type = own_addr_type;
3865 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3866 &param_cp);
3867
3868 memset(&enable_cp, 0, sizeof(enable_cp));
3869 enable_cp.enable = LE_SCAN_ENABLE;
3870 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3871 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3872 &enable_cp);
3873 break;
3874
3875 default:
3876 *status = MGMT_STATUS_INVALID_PARAMS;
3877 return false;
3878 }
3879
3880 return true;
3881 }
3882
3883 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3884 u16 opcode)
3885 {
3886 struct pending_cmd *cmd;
3887 unsigned long timeout;
3888
3889 BT_DBG("status %d", status);
3890
3891 hci_dev_lock(hdev);
3892
3893 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3894 if (!cmd)
3895 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3896
3897 if (cmd) {
3898 cmd->cmd_complete(cmd, mgmt_status(status));
3899 mgmt_pending_remove(cmd);
3900 }
3901
3902 if (status) {
3903 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3904 goto unlock;
3905 }
3906
3907 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3908
3909 /* If the scan involves LE scan, pick proper timeout to schedule
3910 * hdev->le_scan_disable that will stop it.
3911 */
3912 switch (hdev->discovery.type) {
3913 case DISCOV_TYPE_LE:
3914 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3915 break;
3916 case DISCOV_TYPE_INTERLEAVED:
3917 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3918 break;
3919 case DISCOV_TYPE_BREDR:
3920 timeout = 0;
3921 break;
3922 default:
3923 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3924 timeout = 0;
3925 break;
3926 }
3927
3928 if (timeout) {
3929 /* When service discovery is used and the controller has
3930 * a strict duplicate filter, it is important to remember
3931 * the start and duration of the scan. This is required
3932 * for restarting scanning during the discovery phase.
3933 */
3934 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3935 &hdev->quirks) &&
3936 (hdev->discovery.uuid_count > 0 ||
3937 hdev->discovery.rssi != HCI_RSSI_INVALID)) {
3938 hdev->discovery.scan_start = jiffies;
3939 hdev->discovery.scan_duration = timeout;
3940 }
3941
3942 queue_delayed_work(hdev->workqueue,
3943 &hdev->le_scan_disable, timeout);
3944 }
3945
3946 unlock:
3947 hci_dev_unlock(hdev);
3948 }
3949
3950 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3951 void *data, u16 len)
3952 {
3953 struct mgmt_cp_start_discovery *cp = data;
3954 struct pending_cmd *cmd;
3955 struct hci_request req;
3956 u8 status;
3957 int err;
3958
3959 BT_DBG("%s", hdev->name);
3960
3961 hci_dev_lock(hdev);
3962
3963 if (!hdev_is_powered(hdev)) {
3964 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3965 MGMT_STATUS_NOT_POWERED,
3966 &cp->type, sizeof(cp->type));
3967 goto failed;
3968 }
3969
3970 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3971 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3972 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3973 MGMT_STATUS_BUSY, &cp->type,
3974 sizeof(cp->type));
3975 goto failed;
3976 }
3977
3978 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3979 if (!cmd) {
3980 err = -ENOMEM;
3981 goto failed;
3982 }
3983
3984 cmd->cmd_complete = generic_cmd_complete;
3985
3986 /* Clear the discovery filter first to free any previously
3987 * allocated memory for the UUID list.
3988 */
3989 hci_discovery_filter_clear(hdev);
3990
3991 hdev->discovery.type = cp->type;
3992 hdev->discovery.report_invalid_rssi = false;
3993
3994 hci_req_init(&req, hdev);
3995
3996 if (!trigger_discovery(&req, &status)) {
3997 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3998 status, &cp->type, sizeof(cp->type));
3999 mgmt_pending_remove(cmd);
4000 goto failed;
4001 }
4002
4003 err = hci_req_run(&req, start_discovery_complete);
4004 if (err < 0) {
4005 mgmt_pending_remove(cmd);
4006 goto failed;
4007 }
4008
4009 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4010
4011 failed:
4012 hci_dev_unlock(hdev);
4013 return err;
4014 }
4015
4016 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4017 {
4018 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4019 cmd->param, 1);
4020 }
4021
4022 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4023 void *data, u16 len)
4024 {
4025 struct mgmt_cp_start_service_discovery *cp = data;
4026 struct pending_cmd *cmd;
4027 struct hci_request req;
4028 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4029 u16 uuid_count, expected_len;
4030 u8 status;
4031 int err;
4032
4033 BT_DBG("%s", hdev->name);
4034
4035 hci_dev_lock(hdev);
4036
4037 if (!hdev_is_powered(hdev)) {
4038 err = cmd_complete(sk, hdev->id,
4039 MGMT_OP_START_SERVICE_DISCOVERY,
4040 MGMT_STATUS_NOT_POWERED,
4041 &cp->type, sizeof(cp->type));
4042 goto failed;
4043 }
4044
4045 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4046 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4047 err = cmd_complete(sk, hdev->id,
4048 MGMT_OP_START_SERVICE_DISCOVERY,
4049 MGMT_STATUS_BUSY, &cp->type,
4050 sizeof(cp->type));
4051 goto failed;
4052 }
4053
4054 uuid_count = __le16_to_cpu(cp->uuid_count);
4055 if (uuid_count > max_uuid_count) {
4056 BT_ERR("service_discovery: too big uuid_count value %u",
4057 uuid_count);
4058 err = cmd_complete(sk, hdev->id,
4059 MGMT_OP_START_SERVICE_DISCOVERY,
4060 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4061 sizeof(cp->type));
4062 goto failed;
4063 }
4064
4065 expected_len = sizeof(*cp) + uuid_count * 16;
4066 if (expected_len != len) {
4067 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4068 expected_len, len);
4069 err = cmd_complete(sk, hdev->id,
4070 MGMT_OP_START_SERVICE_DISCOVERY,
4071 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4072 sizeof(cp->type));
4073 goto failed;
4074 }
4075
4076 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4077 hdev, data, len);
4078 if (!cmd) {
4079 err = -ENOMEM;
4080 goto failed;
4081 }
4082
4083 cmd->cmd_complete = service_discovery_cmd_complete;
4084
4085 /* Clear the discovery filter first to free any previously
4086 * allocated memory for the UUID list.
4087 */
4088 hci_discovery_filter_clear(hdev);
4089
4090 hdev->discovery.type = cp->type;
4091 hdev->discovery.rssi = cp->rssi;
4092 hdev->discovery.uuid_count = uuid_count;
4093
4094 if (uuid_count > 0) {
4095 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4096 GFP_KERNEL);
4097 if (!hdev->discovery.uuids) {
4098 err = cmd_complete(sk, hdev->id,
4099 MGMT_OP_START_SERVICE_DISCOVERY,
4100 MGMT_STATUS_FAILED,
4101 &cp->type, sizeof(cp->type));
4102 mgmt_pending_remove(cmd);
4103 goto failed;
4104 }
4105 }
4106
4107 hci_req_init(&req, hdev);
4108
4109 if (!trigger_discovery(&req, &status)) {
4110 err = cmd_complete(sk, hdev->id,
4111 MGMT_OP_START_SERVICE_DISCOVERY,
4112 status, &cp->type, sizeof(cp->type));
4113 mgmt_pending_remove(cmd);
4114 goto failed;
4115 }
4116
4117 err = hci_req_run(&req, start_discovery_complete);
4118 if (err < 0) {
4119 mgmt_pending_remove(cmd);
4120 goto failed;
4121 }
4122
4123 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4124
4125 failed:
4126 hci_dev_unlock(hdev);
4127 return err;
4128 }
4129
4130 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4131 {
4132 struct pending_cmd *cmd;
4133
4134 BT_DBG("status %d", status);
4135
4136 hci_dev_lock(hdev);
4137
4138 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4139 if (cmd) {
4140 cmd->cmd_complete(cmd, mgmt_status(status));
4141 mgmt_pending_remove(cmd);
4142 }
4143
4144 if (!status)
4145 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4146
4147 hci_dev_unlock(hdev);
4148 }
4149
4150 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4151 u16 len)
4152 {
4153 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4154 struct pending_cmd *cmd;
4155 struct hci_request req;
4156 int err;
4157
4158 BT_DBG("%s", hdev->name);
4159
4160 hci_dev_lock(hdev);
4161
4162 if (!hci_discovery_active(hdev)) {
4163 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4164 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4165 sizeof(mgmt_cp->type));
4166 goto unlock;
4167 }
4168
4169 if (hdev->discovery.type != mgmt_cp->type) {
4170 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4171 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4172 sizeof(mgmt_cp->type));
4173 goto unlock;
4174 }
4175
4176 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4177 if (!cmd) {
4178 err = -ENOMEM;
4179 goto unlock;
4180 }
4181
4182 cmd->cmd_complete = generic_cmd_complete;
4183
4184 hci_req_init(&req, hdev);
4185
4186 hci_stop_discovery(&req);
4187
4188 err = hci_req_run(&req, stop_discovery_complete);
4189 if (!err) {
4190 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4191 goto unlock;
4192 }
4193
4194 mgmt_pending_remove(cmd);
4195
4196 /* If no HCI commands were sent we're done */
4197 if (err == -ENODATA) {
4198 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4199 &mgmt_cp->type, sizeof(mgmt_cp->type));
4200 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4201 }
4202
4203 unlock:
4204 hci_dev_unlock(hdev);
4205 return err;
4206 }
4207
4208 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4209 u16 len)
4210 {
4211 struct mgmt_cp_confirm_name *cp = data;
4212 struct inquiry_entry *e;
4213 int err;
4214
4215 BT_DBG("%s", hdev->name);
4216
4217 hci_dev_lock(hdev);
4218
4219 if (!hci_discovery_active(hdev)) {
4220 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4221 MGMT_STATUS_FAILED, &cp->addr,
4222 sizeof(cp->addr));
4223 goto failed;
4224 }
4225
4226 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4227 if (!e) {
4228 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4229 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4230 sizeof(cp->addr));
4231 goto failed;
4232 }
4233
4234 if (cp->name_known) {
4235 e->name_state = NAME_KNOWN;
4236 list_del(&e->list);
4237 } else {
4238 e->name_state = NAME_NEEDED;
4239 hci_inquiry_cache_update_resolve(hdev, e);
4240 }
4241
4242 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4243 sizeof(cp->addr));
4244
4245 failed:
4246 hci_dev_unlock(hdev);
4247 return err;
4248 }
4249
4250 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4251 u16 len)
4252 {
4253 struct mgmt_cp_block_device *cp = data;
4254 u8 status;
4255 int err;
4256
4257 BT_DBG("%s", hdev->name);
4258
4259 if (!bdaddr_type_is_valid(cp->addr.type))
4260 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4261 MGMT_STATUS_INVALID_PARAMS,
4262 &cp->addr, sizeof(cp->addr));
4263
4264 hci_dev_lock(hdev);
4265
4266 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4267 cp->addr.type);
4268 if (err < 0) {
4269 status = MGMT_STATUS_FAILED;
4270 goto done;
4271 }
4272
4273 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4274 sk);
4275 status = MGMT_STATUS_SUCCESS;
4276
4277 done:
4278 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4279 &cp->addr, sizeof(cp->addr));
4280
4281 hci_dev_unlock(hdev);
4282
4283 return err;
4284 }
4285
4286 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4287 u16 len)
4288 {
4289 struct mgmt_cp_unblock_device *cp = data;
4290 u8 status;
4291 int err;
4292
4293 BT_DBG("%s", hdev->name);
4294
4295 if (!bdaddr_type_is_valid(cp->addr.type))
4296 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4297 MGMT_STATUS_INVALID_PARAMS,
4298 &cp->addr, sizeof(cp->addr));
4299
4300 hci_dev_lock(hdev);
4301
4302 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4303 cp->addr.type);
4304 if (err < 0) {
4305 status = MGMT_STATUS_INVALID_PARAMS;
4306 goto done;
4307 }
4308
4309 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4310 sk);
4311 status = MGMT_STATUS_SUCCESS;
4312
4313 done:
4314 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4315 &cp->addr, sizeof(cp->addr));
4316
4317 hci_dev_unlock(hdev);
4318
4319 return err;
4320 }
4321
4322 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4323 u16 len)
4324 {
4325 struct mgmt_cp_set_device_id *cp = data;
4326 struct hci_request req;
4327 int err;
4328 __u16 source;
4329
4330 BT_DBG("%s", hdev->name);
4331
4332 source = __le16_to_cpu(cp->source);
4333
4334 if (source > 0x0002)
4335 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4336 MGMT_STATUS_INVALID_PARAMS);
4337
4338 hci_dev_lock(hdev);
4339
4340 hdev->devid_source = source;
4341 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4342 hdev->devid_product = __le16_to_cpu(cp->product);
4343 hdev->devid_version = __le16_to_cpu(cp->version);
4344
4345 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4346
4347 hci_req_init(&req, hdev);
4348 update_eir(&req);
4349 hci_req_run(&req, NULL);
4350
4351 hci_dev_unlock(hdev);
4352
4353 return err;
4354 }
4355
4356 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4357 u16 opcode)
4358 {
4359 struct cmd_lookup match = { NULL, hdev };
4360
4361 hci_dev_lock(hdev);
4362
4363 if (status) {
4364 u8 mgmt_err = mgmt_status(status);
4365
4366 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4367 cmd_status_rsp, &mgmt_err);
4368 goto unlock;
4369 }
4370
4371 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4372 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4373 else
4374 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4375
4376 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4377 &match);
4378
4379 new_settings(hdev, match.sk);
4380
4381 if (match.sk)
4382 sock_put(match.sk);
4383
4384 unlock:
4385 hci_dev_unlock(hdev);
4386 }
4387
4388 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4389 u16 len)
4390 {
4391 struct mgmt_mode *cp = data;
4392 struct pending_cmd *cmd;
4393 struct hci_request req;
4394 u8 val, enabled, status;
4395 int err;
4396
4397 BT_DBG("request for %s", hdev->name);
4398
4399 status = mgmt_le_support(hdev);
4400 if (status)
4401 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4402 status);
4403
4404 if (cp->val != 0x00 && cp->val != 0x01)
4405 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4406 MGMT_STATUS_INVALID_PARAMS);
4407
4408 hci_dev_lock(hdev);
4409
4410 val = !!cp->val;
4411 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4412
4413 /* The following conditions are ones which mean that we should
4414 * not do any HCI communication but directly send a mgmt
4415 * response to user space (after toggling the flag if
4416 * necessary).
4417 */
4418 if (!hdev_is_powered(hdev) || val == enabled ||
4419 hci_conn_num(hdev, LE_LINK) > 0 ||
4420 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4421 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4422 bool changed = false;
4423
4424 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4425 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4426 changed = true;
4427 }
4428
4429 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4430 if (err < 0)
4431 goto unlock;
4432
4433 if (changed)
4434 err = new_settings(hdev, sk);
4435
4436 goto unlock;
4437 }
4438
4439 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4440 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4441 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4442 MGMT_STATUS_BUSY);
4443 goto unlock;
4444 }
4445
4446 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4447 if (!cmd) {
4448 err = -ENOMEM;
4449 goto unlock;
4450 }
4451
4452 hci_req_init(&req, hdev);
4453
4454 if (val)
4455 enable_advertising(&req);
4456 else
4457 disable_advertising(&req);
4458
4459 err = hci_req_run(&req, set_advertising_complete);
4460 if (err < 0)
4461 mgmt_pending_remove(cmd);
4462
4463 unlock:
4464 hci_dev_unlock(hdev);
4465 return err;
4466 }
4467
4468 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4469 void *data, u16 len)
4470 {
4471 struct mgmt_cp_set_static_address *cp = data;
4472 int err;
4473
4474 BT_DBG("%s", hdev->name);
4475
4476 if (!lmp_le_capable(hdev))
4477 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4478 MGMT_STATUS_NOT_SUPPORTED);
4479
4480 if (hdev_is_powered(hdev))
4481 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4482 MGMT_STATUS_REJECTED);
4483
4484 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4485 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4486 return cmd_status(sk, hdev->id,
4487 MGMT_OP_SET_STATIC_ADDRESS,
4488 MGMT_STATUS_INVALID_PARAMS);
4489
4490 /* Two most significant bits shall be set */
4491 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4492 return cmd_status(sk, hdev->id,
4493 MGMT_OP_SET_STATIC_ADDRESS,
4494 MGMT_STATUS_INVALID_PARAMS);
4495 }
4496
4497 hci_dev_lock(hdev);
4498
4499 bacpy(&hdev->static_addr, &cp->bdaddr);
4500
4501 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4502
4503 hci_dev_unlock(hdev);
4504
4505 return err;
4506 }
4507
4508 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4509 void *data, u16 len)
4510 {
4511 struct mgmt_cp_set_scan_params *cp = data;
4512 __u16 interval, window;
4513 int err;
4514
4515 BT_DBG("%s", hdev->name);
4516
4517 if (!lmp_le_capable(hdev))
4518 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4519 MGMT_STATUS_NOT_SUPPORTED);
4520
4521 interval = __le16_to_cpu(cp->interval);
4522
4523 if (interval < 0x0004 || interval > 0x4000)
4524 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4525 MGMT_STATUS_INVALID_PARAMS);
4526
4527 window = __le16_to_cpu(cp->window);
4528
4529 if (window < 0x0004 || window > 0x4000)
4530 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4531 MGMT_STATUS_INVALID_PARAMS);
4532
4533 if (window > interval)
4534 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4535 MGMT_STATUS_INVALID_PARAMS);
4536
4537 hci_dev_lock(hdev);
4538
4539 hdev->le_scan_interval = interval;
4540 hdev->le_scan_window = window;
4541
4542 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4543
4544 /* If background scan is running, restart it so new parameters are
4545 * loaded.
4546 */
4547 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4548 hdev->discovery.state == DISCOVERY_STOPPED) {
4549 struct hci_request req;
4550
4551 hci_req_init(&req, hdev);
4552
4553 hci_req_add_le_scan_disable(&req);
4554 hci_req_add_le_passive_scan(&req);
4555
4556 hci_req_run(&req, NULL);
4557 }
4558
4559 hci_dev_unlock(hdev);
4560
4561 return err;
4562 }
4563
4564 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4565 u16 opcode)
4566 {
4567 struct pending_cmd *cmd;
4568
4569 BT_DBG("status 0x%02x", status);
4570
4571 hci_dev_lock(hdev);
4572
4573 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4574 if (!cmd)
4575 goto unlock;
4576
4577 if (status) {
4578 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4579 mgmt_status(status));
4580 } else {
4581 struct mgmt_mode *cp = cmd->param;
4582
4583 if (cp->val)
4584 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4585 else
4586 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4587
4588 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4589 new_settings(hdev, cmd->sk);
4590 }
4591
4592 mgmt_pending_remove(cmd);
4593
4594 unlock:
4595 hci_dev_unlock(hdev);
4596 }
4597
4598 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4599 void *data, u16 len)
4600 {
4601 struct mgmt_mode *cp = data;
4602 struct pending_cmd *cmd;
4603 struct hci_request req;
4604 int err;
4605
4606 BT_DBG("%s", hdev->name);
4607
4608 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4609 hdev->hci_ver < BLUETOOTH_VER_1_2)
4610 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4611 MGMT_STATUS_NOT_SUPPORTED);
4612
4613 if (cp->val != 0x00 && cp->val != 0x01)
4614 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4615 MGMT_STATUS_INVALID_PARAMS);
4616
4617 if (!hdev_is_powered(hdev))
4618 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4619 MGMT_STATUS_NOT_POWERED);
4620
4621 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4622 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4623 MGMT_STATUS_REJECTED);
4624
4625 hci_dev_lock(hdev);
4626
4627 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4629 MGMT_STATUS_BUSY);
4630 goto unlock;
4631 }
4632
4633 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4634 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4635 hdev);
4636 goto unlock;
4637 }
4638
4639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4640 data, len);
4641 if (!cmd) {
4642 err = -ENOMEM;
4643 goto unlock;
4644 }
4645
4646 hci_req_init(&req, hdev);
4647
4648 write_fast_connectable(&req, cp->val);
4649
4650 err = hci_req_run(&req, fast_connectable_complete);
4651 if (err < 0) {
4652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4653 MGMT_STATUS_FAILED);
4654 mgmt_pending_remove(cmd);
4655 }
4656
4657 unlock:
4658 hci_dev_unlock(hdev);
4659
4660 return err;
4661 }
4662
4663 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4664 {
4665 struct pending_cmd *cmd;
4666
4667 BT_DBG("status 0x%02x", status);
4668
4669 hci_dev_lock(hdev);
4670
4671 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4672 if (!cmd)
4673 goto unlock;
4674
4675 if (status) {
4676 u8 mgmt_err = mgmt_status(status);
4677
4678 /* We need to restore the flag if related HCI commands
4679 * failed.
4680 */
4681 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4682
4683 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4684 } else {
4685 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4686 new_settings(hdev, cmd->sk);
4687 }
4688
4689 mgmt_pending_remove(cmd);
4690
4691 unlock:
4692 hci_dev_unlock(hdev);
4693 }
4694
4695 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4696 {
4697 struct mgmt_mode *cp = data;
4698 struct pending_cmd *cmd;
4699 struct hci_request req;
4700 int err;
4701
4702 BT_DBG("request for %s", hdev->name);
4703
4704 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4705 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4706 MGMT_STATUS_NOT_SUPPORTED);
4707
4708 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4709 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4710 MGMT_STATUS_REJECTED);
4711
4712 if (cp->val != 0x00 && cp->val != 0x01)
4713 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4714 MGMT_STATUS_INVALID_PARAMS);
4715
4716 hci_dev_lock(hdev);
4717
4718 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4719 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4720 goto unlock;
4721 }
4722
4723 if (!hdev_is_powered(hdev)) {
4724 if (!cp->val) {
4725 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4726 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4727 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4728 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4729 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4730 }
4731
4732 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4733
4734 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4735 if (err < 0)
4736 goto unlock;
4737
4738 err = new_settings(hdev, sk);
4739 goto unlock;
4740 }
4741
4742 /* Reject disabling when powered on */
4743 if (!cp->val) {
4744 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4745 MGMT_STATUS_REJECTED);
4746 goto unlock;
4747 } else {
4748 /* When configuring a dual-mode controller to operate
4749 * with LE only and using a static address, then switching
4750 * BR/EDR back on is not allowed.
4751 *
4752 * Dual-mode controllers shall operate with the public
4753 * address as its identity address for BR/EDR and LE. So
4754 * reject the attempt to create an invalid configuration.
4755 *
4756 * The same restrictions applies when secure connections
4757 * has been enabled. For BR/EDR this is a controller feature
4758 * while for LE it is a host stack feature. This means that
4759 * switching BR/EDR back on when secure connections has been
4760 * enabled is not a supported transaction.
4761 */
4762 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4763 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4764 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4765 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4766 MGMT_STATUS_REJECTED);
4767 goto unlock;
4768 }
4769 }
4770
4771 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4772 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4773 MGMT_STATUS_BUSY);
4774 goto unlock;
4775 }
4776
4777 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4778 if (!cmd) {
4779 err = -ENOMEM;
4780 goto unlock;
4781 }
4782
4783 /* We need to flip the bit already here so that update_adv_data
4784 * generates the correct flags.
4785 */
4786 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4787
4788 hci_req_init(&req, hdev);
4789
4790 write_fast_connectable(&req, false);
4791 __hci_update_page_scan(&req);
4792
4793 /* Since only the advertising data flags will change, there
4794 * is no need to update the scan response data.
4795 */
4796 update_adv_data(&req);
4797
4798 err = hci_req_run(&req, set_bredr_complete);
4799 if (err < 0)
4800 mgmt_pending_remove(cmd);
4801
4802 unlock:
4803 hci_dev_unlock(hdev);
4804 return err;
4805 }
4806
4807 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4808 {
4809 struct pending_cmd *cmd;
4810 struct mgmt_mode *cp;
4811
4812 BT_DBG("%s status %u", hdev->name, status);
4813
4814 hci_dev_lock(hdev);
4815
4816 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4817 if (!cmd)
4818 goto unlock;
4819
4820 if (status) {
4821 cmd_status(cmd->sk, cmd->index, cmd->opcode,
4822 mgmt_status(status));
4823 goto remove;
4824 }
4825
4826 cp = cmd->param;
4827
4828 switch (cp->val) {
4829 case 0x00:
4830 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4831 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4832 break;
4833 case 0x01:
4834 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4835 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4836 break;
4837 case 0x02:
4838 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4839 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4840 break;
4841 }
4842
4843 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4844 new_settings(hdev, cmd->sk);
4845
4846 remove:
4847 mgmt_pending_remove(cmd);
4848 unlock:
4849 hci_dev_unlock(hdev);
4850 }
4851
4852 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4853 void *data, u16 len)
4854 {
4855 struct mgmt_mode *cp = data;
4856 struct pending_cmd *cmd;
4857 struct hci_request req;
4858 u8 val;
4859 int err;
4860
4861 BT_DBG("request for %s", hdev->name);
4862
4863 if (!lmp_sc_capable(hdev) &&
4864 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4865 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4866 MGMT_STATUS_NOT_SUPPORTED);
4867
4868 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4869 lmp_sc_capable(hdev) &&
4870 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4871 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4872 MGMT_STATUS_REJECTED);
4873
4874 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4875 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4876 MGMT_STATUS_INVALID_PARAMS);
4877
4878 hci_dev_lock(hdev);
4879
4880 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4881 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4882 bool changed;
4883
4884 if (cp->val) {
4885 changed = !test_and_set_bit(HCI_SC_ENABLED,
4886 &hdev->dev_flags);
4887 if (cp->val == 0x02)
4888 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4889 else
4890 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4891 } else {
4892 changed = test_and_clear_bit(HCI_SC_ENABLED,
4893 &hdev->dev_flags);
4894 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4895 }
4896
4897 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4898 if (err < 0)
4899 goto failed;
4900
4901 if (changed)
4902 err = new_settings(hdev, sk);
4903
4904 goto failed;
4905 }
4906
4907 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4908 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4909 MGMT_STATUS_BUSY);
4910 goto failed;
4911 }
4912
4913 val = !!cp->val;
4914
4915 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4916 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4917 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4918 goto failed;
4919 }
4920
4921 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4922 if (!cmd) {
4923 err = -ENOMEM;
4924 goto failed;
4925 }
4926
4927 hci_req_init(&req, hdev);
4928 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4929 err = hci_req_run(&req, sc_enable_complete);
4930 if (err < 0) {
4931 mgmt_pending_remove(cmd);
4932 goto failed;
4933 }
4934
4935 failed:
4936 hci_dev_unlock(hdev);
4937 return err;
4938 }
4939
4940 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4941 void *data, u16 len)
4942 {
4943 struct mgmt_mode *cp = data;
4944 bool changed, use_changed;
4945 int err;
4946
4947 BT_DBG("request for %s", hdev->name);
4948
4949 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4950 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4951 MGMT_STATUS_INVALID_PARAMS);
4952
4953 hci_dev_lock(hdev);
4954
4955 if (cp->val)
4956 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4957 &hdev->dev_flags);
4958 else
4959 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4960 &hdev->dev_flags);
4961
4962 if (cp->val == 0x02)
4963 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4964 &hdev->dev_flags);
4965 else
4966 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4967 &hdev->dev_flags);
4968
4969 if (hdev_is_powered(hdev) && use_changed &&
4970 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4971 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4972 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4973 sizeof(mode), &mode);
4974 }
4975
4976 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4977 if (err < 0)
4978 goto unlock;
4979
4980 if (changed)
4981 err = new_settings(hdev, sk);
4982
4983 unlock:
4984 hci_dev_unlock(hdev);
4985 return err;
4986 }
4987
4988 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4989 u16 len)
4990 {
4991 struct mgmt_cp_set_privacy *cp = cp_data;
4992 bool changed;
4993 int err;
4994
4995 BT_DBG("request for %s", hdev->name);
4996
4997 if (!lmp_le_capable(hdev))
4998 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4999 MGMT_STATUS_NOT_SUPPORTED);
5000
5001 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5002 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5003 MGMT_STATUS_INVALID_PARAMS);
5004
5005 if (hdev_is_powered(hdev))
5006 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5007 MGMT_STATUS_REJECTED);
5008
5009 hci_dev_lock(hdev);
5010
5011 /* If user space supports this command it is also expected to
5012 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5013 */
5014 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5015
5016 if (cp->privacy) {
5017 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5018 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5019 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5020 } else {
5021 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5022 memset(hdev->irk, 0, sizeof(hdev->irk));
5023 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5024 }
5025
5026 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5027 if (err < 0)
5028 goto unlock;
5029
5030 if (changed)
5031 err = new_settings(hdev, sk);
5032
5033 unlock:
5034 hci_dev_unlock(hdev);
5035 return err;
5036 }
5037
5038 static bool irk_is_valid(struct mgmt_irk_info *irk)
5039 {
5040 switch (irk->addr.type) {
5041 case BDADDR_LE_PUBLIC:
5042 return true;
5043
5044 case BDADDR_LE_RANDOM:
5045 /* Two most significant bits shall be set */
5046 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5047 return false;
5048 return true;
5049 }
5050
5051 return false;
5052 }
5053
5054 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5055 u16 len)
5056 {
5057 struct mgmt_cp_load_irks *cp = cp_data;
5058 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5059 sizeof(struct mgmt_irk_info));
5060 u16 irk_count, expected_len;
5061 int i, err;
5062
5063 BT_DBG("request for %s", hdev->name);
5064
5065 if (!lmp_le_capable(hdev))
5066 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5067 MGMT_STATUS_NOT_SUPPORTED);
5068
5069 irk_count = __le16_to_cpu(cp->irk_count);
5070 if (irk_count > max_irk_count) {
5071 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5072 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5073 MGMT_STATUS_INVALID_PARAMS);
5074 }
5075
5076 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5077 if (expected_len != len) {
5078 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5079 expected_len, len);
5080 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5081 MGMT_STATUS_INVALID_PARAMS);
5082 }
5083
5084 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5085
5086 for (i = 0; i < irk_count; i++) {
5087 struct mgmt_irk_info *key = &cp->irks[i];
5088
5089 if (!irk_is_valid(key))
5090 return cmd_status(sk, hdev->id,
5091 MGMT_OP_LOAD_IRKS,
5092 MGMT_STATUS_INVALID_PARAMS);
5093 }
5094
5095 hci_dev_lock(hdev);
5096
5097 hci_smp_irks_clear(hdev);
5098
5099 for (i = 0; i < irk_count; i++) {
5100 struct mgmt_irk_info *irk = &cp->irks[i];
5101 u8 addr_type;
5102
5103 if (irk->addr.type == BDADDR_LE_PUBLIC)
5104 addr_type = ADDR_LE_DEV_PUBLIC;
5105 else
5106 addr_type = ADDR_LE_DEV_RANDOM;
5107
5108 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5109 BDADDR_ANY);
5110 }
5111
5112 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5113
5114 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5115
5116 hci_dev_unlock(hdev);
5117
5118 return err;
5119 }
5120
5121 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5122 {
5123 if (key->master != 0x00 && key->master != 0x01)
5124 return false;
5125
5126 switch (key->addr.type) {
5127 case BDADDR_LE_PUBLIC:
5128 return true;
5129
5130 case BDADDR_LE_RANDOM:
5131 /* Two most significant bits shall be set */
5132 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5133 return false;
5134 return true;
5135 }
5136
5137 return false;
5138 }
5139
5140 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5141 void *cp_data, u16 len)
5142 {
5143 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5144 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5145 sizeof(struct mgmt_ltk_info));
5146 u16 key_count, expected_len;
5147 int i, err;
5148
5149 BT_DBG("request for %s", hdev->name);
5150
5151 if (!lmp_le_capable(hdev))
5152 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5153 MGMT_STATUS_NOT_SUPPORTED);
5154
5155 key_count = __le16_to_cpu(cp->key_count);
5156 if (key_count > max_key_count) {
5157 BT_ERR("load_ltks: too big key_count value %u", key_count);
5158 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5159 MGMT_STATUS_INVALID_PARAMS);
5160 }
5161
5162 expected_len = sizeof(*cp) + key_count *
5163 sizeof(struct mgmt_ltk_info);
5164 if (expected_len != len) {
5165 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5166 expected_len, len);
5167 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5168 MGMT_STATUS_INVALID_PARAMS);
5169 }
5170
5171 BT_DBG("%s key_count %u", hdev->name, key_count);
5172
5173 for (i = 0; i < key_count; i++) {
5174 struct mgmt_ltk_info *key = &cp->keys[i];
5175
5176 if (!ltk_is_valid(key))
5177 return cmd_status(sk, hdev->id,
5178 MGMT_OP_LOAD_LONG_TERM_KEYS,
5179 MGMT_STATUS_INVALID_PARAMS);
5180 }
5181
5182 hci_dev_lock(hdev);
5183
5184 hci_smp_ltks_clear(hdev);
5185
5186 for (i = 0; i < key_count; i++) {
5187 struct mgmt_ltk_info *key = &cp->keys[i];
5188 u8 type, addr_type, authenticated;
5189
5190 if (key->addr.type == BDADDR_LE_PUBLIC)
5191 addr_type = ADDR_LE_DEV_PUBLIC;
5192 else
5193 addr_type = ADDR_LE_DEV_RANDOM;
5194
5195 switch (key->type) {
5196 case MGMT_LTK_UNAUTHENTICATED:
5197 authenticated = 0x00;
5198 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5199 break;
5200 case MGMT_LTK_AUTHENTICATED:
5201 authenticated = 0x01;
5202 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5203 break;
5204 case MGMT_LTK_P256_UNAUTH:
5205 authenticated = 0x00;
5206 type = SMP_LTK_P256;
5207 break;
5208 case MGMT_LTK_P256_AUTH:
5209 authenticated = 0x01;
5210 type = SMP_LTK_P256;
5211 break;
5212 case MGMT_LTK_P256_DEBUG:
5213 authenticated = 0x00;
5214 type = SMP_LTK_P256_DEBUG;
5215 default:
5216 continue;
5217 }
5218
5219 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5220 authenticated, key->val, key->enc_size, key->ediv,
5221 key->rand);
5222 }
5223
5224 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5225 NULL, 0);
5226
5227 hci_dev_unlock(hdev);
5228
5229 return err;
5230 }
5231
5232 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5233 {
5234 struct hci_conn *conn = cmd->user_data;
5235 struct mgmt_rp_get_conn_info rp;
5236 int err;
5237
5238 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5239
5240 if (status == MGMT_STATUS_SUCCESS) {
5241 rp.rssi = conn->rssi;
5242 rp.tx_power = conn->tx_power;
5243 rp.max_tx_power = conn->max_tx_power;
5244 } else {
5245 rp.rssi = HCI_RSSI_INVALID;
5246 rp.tx_power = HCI_TX_POWER_INVALID;
5247 rp.max_tx_power = HCI_TX_POWER_INVALID;
5248 }
5249
5250 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5251 &rp, sizeof(rp));
5252
5253 hci_conn_drop(conn);
5254 hci_conn_put(conn);
5255
5256 return err;
5257 }
5258
5259 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5260 u16 opcode)
5261 {
5262 struct hci_cp_read_rssi *cp;
5263 struct pending_cmd *cmd;
5264 struct hci_conn *conn;
5265 u16 handle;
5266 u8 status;
5267
5268 BT_DBG("status 0x%02x", hci_status);
5269
5270 hci_dev_lock(hdev);
5271
5272 /* Commands sent in request are either Read RSSI or Read Transmit Power
5273 * Level so we check which one was last sent to retrieve connection
5274 * handle. Both commands have handle as first parameter so it's safe to
5275 * cast data on the same command struct.
5276 *
5277 * First command sent is always Read RSSI and we fail only if it fails.
5278 * In other case we simply override error to indicate success as we
5279 * already remembered if TX power value is actually valid.
5280 */
5281 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5282 if (!cp) {
5283 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5284 status = MGMT_STATUS_SUCCESS;
5285 } else {
5286 status = mgmt_status(hci_status);
5287 }
5288
5289 if (!cp) {
5290 BT_ERR("invalid sent_cmd in conn_info response");
5291 goto unlock;
5292 }
5293
5294 handle = __le16_to_cpu(cp->handle);
5295 conn = hci_conn_hash_lookup_handle(hdev, handle);
5296 if (!conn) {
5297 BT_ERR("unknown handle (%d) in conn_info response", handle);
5298 goto unlock;
5299 }
5300
5301 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5302 if (!cmd)
5303 goto unlock;
5304
5305 cmd->cmd_complete(cmd, status);
5306 mgmt_pending_remove(cmd);
5307
5308 unlock:
5309 hci_dev_unlock(hdev);
5310 }
5311
5312 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5313 u16 len)
5314 {
5315 struct mgmt_cp_get_conn_info *cp = data;
5316 struct mgmt_rp_get_conn_info rp;
5317 struct hci_conn *conn;
5318 unsigned long conn_info_age;
5319 int err = 0;
5320
5321 BT_DBG("%s", hdev->name);
5322
5323 memset(&rp, 0, sizeof(rp));
5324 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5325 rp.addr.type = cp->addr.type;
5326
5327 if (!bdaddr_type_is_valid(cp->addr.type))
5328 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5329 MGMT_STATUS_INVALID_PARAMS,
5330 &rp, sizeof(rp));
5331
5332 hci_dev_lock(hdev);
5333
5334 if (!hdev_is_powered(hdev)) {
5335 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5336 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5337 goto unlock;
5338 }
5339
5340 if (cp->addr.type == BDADDR_BREDR)
5341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5342 &cp->addr.bdaddr);
5343 else
5344 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5345
5346 if (!conn || conn->state != BT_CONNECTED) {
5347 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5348 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5349 goto unlock;
5350 }
5351
5352 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5353 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5354 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5355 goto unlock;
5356 }
5357
5358 /* To avoid client trying to guess when to poll again for information we
5359 * calculate conn info age as random value between min/max set in hdev.
5360 */
5361 conn_info_age = hdev->conn_info_min_age +
5362 prandom_u32_max(hdev->conn_info_max_age -
5363 hdev->conn_info_min_age);
5364
5365 /* Query controller to refresh cached values if they are too old or were
5366 * never read.
5367 */
5368 if (time_after(jiffies, conn->conn_info_timestamp +
5369 msecs_to_jiffies(conn_info_age)) ||
5370 !conn->conn_info_timestamp) {
5371 struct hci_request req;
5372 struct hci_cp_read_tx_power req_txp_cp;
5373 struct hci_cp_read_rssi req_rssi_cp;
5374 struct pending_cmd *cmd;
5375
5376 hci_req_init(&req, hdev);
5377 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5378 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5379 &req_rssi_cp);
5380
5381 /* For LE links TX power does not change thus we don't need to
5382 * query for it once value is known.
5383 */
5384 if (!bdaddr_type_is_le(cp->addr.type) ||
5385 conn->tx_power == HCI_TX_POWER_INVALID) {
5386 req_txp_cp.handle = cpu_to_le16(conn->handle);
5387 req_txp_cp.type = 0x00;
5388 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5389 sizeof(req_txp_cp), &req_txp_cp);
5390 }
5391
5392 /* Max TX power needs to be read only once per connection */
5393 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5394 req_txp_cp.handle = cpu_to_le16(conn->handle);
5395 req_txp_cp.type = 0x01;
5396 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5397 sizeof(req_txp_cp), &req_txp_cp);
5398 }
5399
5400 err = hci_req_run(&req, conn_info_refresh_complete);
5401 if (err < 0)
5402 goto unlock;
5403
5404 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5405 data, len);
5406 if (!cmd) {
5407 err = -ENOMEM;
5408 goto unlock;
5409 }
5410
5411 hci_conn_hold(conn);
5412 cmd->user_data = hci_conn_get(conn);
5413 cmd->cmd_complete = conn_info_cmd_complete;
5414
5415 conn->conn_info_timestamp = jiffies;
5416 } else {
5417 /* Cache is valid, just reply with values cached in hci_conn */
5418 rp.rssi = conn->rssi;
5419 rp.tx_power = conn->tx_power;
5420 rp.max_tx_power = conn->max_tx_power;
5421
5422 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5423 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5424 }
5425
5426 unlock:
5427 hci_dev_unlock(hdev);
5428 return err;
5429 }
5430
5431 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5432 {
5433 struct hci_conn *conn = cmd->user_data;
5434 struct mgmt_rp_get_clock_info rp;
5435 struct hci_dev *hdev;
5436 int err;
5437
5438 memset(&rp, 0, sizeof(rp));
5439 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5440
5441 if (status)
5442 goto complete;
5443
5444 hdev = hci_dev_get(cmd->index);
5445 if (hdev) {
5446 rp.local_clock = cpu_to_le32(hdev->clock);
5447 hci_dev_put(hdev);
5448 }
5449
5450 if (conn) {
5451 rp.piconet_clock = cpu_to_le32(conn->clock);
5452 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5453 }
5454
5455 complete:
5456 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5457 sizeof(rp));
5458
5459 if (conn) {
5460 hci_conn_drop(conn);
5461 hci_conn_put(conn);
5462 }
5463
5464 return err;
5465 }
5466
5467 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5468 {
5469 struct hci_cp_read_clock *hci_cp;
5470 struct pending_cmd *cmd;
5471 struct hci_conn *conn;
5472
5473 BT_DBG("%s status %u", hdev->name, status);
5474
5475 hci_dev_lock(hdev);
5476
5477 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5478 if (!hci_cp)
5479 goto unlock;
5480
5481 if (hci_cp->which) {
5482 u16 handle = __le16_to_cpu(hci_cp->handle);
5483 conn = hci_conn_hash_lookup_handle(hdev, handle);
5484 } else {
5485 conn = NULL;
5486 }
5487
5488 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5489 if (!cmd)
5490 goto unlock;
5491
5492 cmd->cmd_complete(cmd, mgmt_status(status));
5493 mgmt_pending_remove(cmd);
5494
5495 unlock:
5496 hci_dev_unlock(hdev);
5497 }
5498
5499 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5500 u16 len)
5501 {
5502 struct mgmt_cp_get_clock_info *cp = data;
5503 struct mgmt_rp_get_clock_info rp;
5504 struct hci_cp_read_clock hci_cp;
5505 struct pending_cmd *cmd;
5506 struct hci_request req;
5507 struct hci_conn *conn;
5508 int err;
5509
5510 BT_DBG("%s", hdev->name);
5511
5512 memset(&rp, 0, sizeof(rp));
5513 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5514 rp.addr.type = cp->addr.type;
5515
5516 if (cp->addr.type != BDADDR_BREDR)
5517 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5518 MGMT_STATUS_INVALID_PARAMS,
5519 &rp, sizeof(rp));
5520
5521 hci_dev_lock(hdev);
5522
5523 if (!hdev_is_powered(hdev)) {
5524 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5525 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5526 goto unlock;
5527 }
5528
5529 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5531 &cp->addr.bdaddr);
5532 if (!conn || conn->state != BT_CONNECTED) {
5533 err = cmd_complete(sk, hdev->id,
5534 MGMT_OP_GET_CLOCK_INFO,
5535 MGMT_STATUS_NOT_CONNECTED,
5536 &rp, sizeof(rp));
5537 goto unlock;
5538 }
5539 } else {
5540 conn = NULL;
5541 }
5542
5543 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5544 if (!cmd) {
5545 err = -ENOMEM;
5546 goto unlock;
5547 }
5548
5549 cmd->cmd_complete = clock_info_cmd_complete;
5550
5551 hci_req_init(&req, hdev);
5552
5553 memset(&hci_cp, 0, sizeof(hci_cp));
5554 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5555
5556 if (conn) {
5557 hci_conn_hold(conn);
5558 cmd->user_data = hci_conn_get(conn);
5559
5560 hci_cp.handle = cpu_to_le16(conn->handle);
5561 hci_cp.which = 0x01; /* Piconet clock */
5562 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5563 }
5564
5565 err = hci_req_run(&req, get_clock_info_complete);
5566 if (err < 0)
5567 mgmt_pending_remove(cmd);
5568
5569 unlock:
5570 hci_dev_unlock(hdev);
5571 return err;
5572 }
5573
5574 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5575 {
5576 struct hci_conn *conn;
5577
5578 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5579 if (!conn)
5580 return false;
5581
5582 if (conn->dst_type != type)
5583 return false;
5584
5585 if (conn->state != BT_CONNECTED)
5586 return false;
5587
5588 return true;
5589 }
5590
5591 /* This function requires the caller holds hdev->lock */
5592 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5593 u8 addr_type, u8 auto_connect)
5594 {
5595 struct hci_dev *hdev = req->hdev;
5596 struct hci_conn_params *params;
5597
5598 params = hci_conn_params_add(hdev, addr, addr_type);
5599 if (!params)
5600 return -EIO;
5601
5602 if (params->auto_connect == auto_connect)
5603 return 0;
5604
5605 list_del_init(&params->action);
5606
5607 switch (auto_connect) {
5608 case HCI_AUTO_CONN_DISABLED:
5609 case HCI_AUTO_CONN_LINK_LOSS:
5610 __hci_update_background_scan(req);
5611 break;
5612 case HCI_AUTO_CONN_REPORT:
5613 list_add(&params->action, &hdev->pend_le_reports);
5614 __hci_update_background_scan(req);
5615 break;
5616 case HCI_AUTO_CONN_DIRECT:
5617 case HCI_AUTO_CONN_ALWAYS:
5618 if (!is_connected(hdev, addr, addr_type)) {
5619 list_add(&params->action, &hdev->pend_le_conns);
5620 __hci_update_background_scan(req);
5621 }
5622 break;
5623 }
5624
5625 params->auto_connect = auto_connect;
5626
5627 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5628 auto_connect);
5629
5630 return 0;
5631 }
5632
5633 static void device_added(struct sock *sk, struct hci_dev *hdev,
5634 bdaddr_t *bdaddr, u8 type, u8 action)
5635 {
5636 struct mgmt_ev_device_added ev;
5637
5638 bacpy(&ev.addr.bdaddr, bdaddr);
5639 ev.addr.type = type;
5640 ev.action = action;
5641
5642 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5643 }
5644
5645 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5646 {
5647 struct pending_cmd *cmd;
5648
5649 BT_DBG("status 0x%02x", status);
5650
5651 hci_dev_lock(hdev);
5652
5653 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5654 if (!cmd)
5655 goto unlock;
5656
5657 cmd->cmd_complete(cmd, mgmt_status(status));
5658 mgmt_pending_remove(cmd);
5659
5660 unlock:
5661 hci_dev_unlock(hdev);
5662 }
5663
5664 static int add_device(struct sock *sk, struct hci_dev *hdev,
5665 void *data, u16 len)
5666 {
5667 struct mgmt_cp_add_device *cp = data;
5668 struct pending_cmd *cmd;
5669 struct hci_request req;
5670 u8 auto_conn, addr_type;
5671 int err;
5672
5673 BT_DBG("%s", hdev->name);
5674
5675 if (!bdaddr_type_is_valid(cp->addr.type) ||
5676 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5677 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5678 MGMT_STATUS_INVALID_PARAMS,
5679 &cp->addr, sizeof(cp->addr));
5680
5681 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5682 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5683 MGMT_STATUS_INVALID_PARAMS,
5684 &cp->addr, sizeof(cp->addr));
5685
5686 hci_req_init(&req, hdev);
5687
5688 hci_dev_lock(hdev);
5689
5690 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5691 if (!cmd) {
5692 err = -ENOMEM;
5693 goto unlock;
5694 }
5695
5696 cmd->cmd_complete = addr_cmd_complete;
5697
5698 if (cp->addr.type == BDADDR_BREDR) {
5699 /* Only incoming connections action is supported for now */
5700 if (cp->action != 0x01) {
5701 err = cmd->cmd_complete(cmd,
5702 MGMT_STATUS_INVALID_PARAMS);
5703 mgmt_pending_remove(cmd);
5704 goto unlock;
5705 }
5706
5707 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5708 cp->addr.type);
5709 if (err)
5710 goto unlock;
5711
5712 __hci_update_page_scan(&req);
5713
5714 goto added;
5715 }
5716
5717 if (cp->addr.type == BDADDR_LE_PUBLIC)
5718 addr_type = ADDR_LE_DEV_PUBLIC;
5719 else
5720 addr_type = ADDR_LE_DEV_RANDOM;
5721
5722 if (cp->action == 0x02)
5723 auto_conn = HCI_AUTO_CONN_ALWAYS;
5724 else if (cp->action == 0x01)
5725 auto_conn = HCI_AUTO_CONN_DIRECT;
5726 else
5727 auto_conn = HCI_AUTO_CONN_REPORT;
5728
5729 /* If the connection parameters don't exist for this device,
5730 * they will be created and configured with defaults.
5731 */
5732 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5733 auto_conn) < 0) {
5734 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5735 mgmt_pending_remove(cmd);
5736 goto unlock;
5737 }
5738
5739 added:
5740 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5741
5742 err = hci_req_run(&req, add_device_complete);
5743 if (err < 0) {
5744 /* ENODATA means no HCI commands were needed (e.g. if
5745 * the adapter is powered off).
5746 */
5747 if (err == -ENODATA)
5748 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5749 mgmt_pending_remove(cmd);
5750 }
5751
5752 unlock:
5753 hci_dev_unlock(hdev);
5754 return err;
5755 }
5756
5757 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5758 bdaddr_t *bdaddr, u8 type)
5759 {
5760 struct mgmt_ev_device_removed ev;
5761
5762 bacpy(&ev.addr.bdaddr, bdaddr);
5763 ev.addr.type = type;
5764
5765 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5766 }
5767
5768 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5769 {
5770 struct pending_cmd *cmd;
5771
5772 BT_DBG("status 0x%02x", status);
5773
5774 hci_dev_lock(hdev);
5775
5776 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5777 if (!cmd)
5778 goto unlock;
5779
5780 cmd->cmd_complete(cmd, mgmt_status(status));
5781 mgmt_pending_remove(cmd);
5782
5783 unlock:
5784 hci_dev_unlock(hdev);
5785 }
5786
5787 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5788 void *data, u16 len)
5789 {
5790 struct mgmt_cp_remove_device *cp = data;
5791 struct pending_cmd *cmd;
5792 struct hci_request req;
5793 int err;
5794
5795 BT_DBG("%s", hdev->name);
5796
5797 hci_req_init(&req, hdev);
5798
5799 hci_dev_lock(hdev);
5800
5801 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5802 if (!cmd) {
5803 err = -ENOMEM;
5804 goto unlock;
5805 }
5806
5807 cmd->cmd_complete = addr_cmd_complete;
5808
5809 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5810 struct hci_conn_params *params;
5811 u8 addr_type;
5812
5813 if (!bdaddr_type_is_valid(cp->addr.type)) {
5814 err = cmd->cmd_complete(cmd,
5815 MGMT_STATUS_INVALID_PARAMS);
5816 mgmt_pending_remove(cmd);
5817 goto unlock;
5818 }
5819
5820 if (cp->addr.type == BDADDR_BREDR) {
5821 err = hci_bdaddr_list_del(&hdev->whitelist,
5822 &cp->addr.bdaddr,
5823 cp->addr.type);
5824 if (err) {
5825 err = cmd->cmd_complete(cmd,
5826 MGMT_STATUS_INVALID_PARAMS);
5827 mgmt_pending_remove(cmd);
5828 goto unlock;
5829 }
5830
5831 __hci_update_page_scan(&req);
5832
5833 device_removed(sk, hdev, &cp->addr.bdaddr,
5834 cp->addr.type);
5835 goto complete;
5836 }
5837
5838 if (cp->addr.type == BDADDR_LE_PUBLIC)
5839 addr_type = ADDR_LE_DEV_PUBLIC;
5840 else
5841 addr_type = ADDR_LE_DEV_RANDOM;
5842
5843 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5844 addr_type);
5845 if (!params) {
5846 err = cmd->cmd_complete(cmd,
5847 MGMT_STATUS_INVALID_PARAMS);
5848 mgmt_pending_remove(cmd);
5849 goto unlock;
5850 }
5851
5852 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5853 err = cmd->cmd_complete(cmd,
5854 MGMT_STATUS_INVALID_PARAMS);
5855 mgmt_pending_remove(cmd);
5856 goto unlock;
5857 }
5858
5859 list_del(&params->action);
5860 list_del(&params->list);
5861 kfree(params);
5862 __hci_update_background_scan(&req);
5863
5864 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5865 } else {
5866 struct hci_conn_params *p, *tmp;
5867 struct bdaddr_list *b, *btmp;
5868
5869 if (cp->addr.type) {
5870 err = cmd->cmd_complete(cmd,
5871 MGMT_STATUS_INVALID_PARAMS);
5872 mgmt_pending_remove(cmd);
5873 goto unlock;
5874 }
5875
5876 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5877 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5878 list_del(&b->list);
5879 kfree(b);
5880 }
5881
5882 __hci_update_page_scan(&req);
5883
5884 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5885 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5886 continue;
5887 device_removed(sk, hdev, &p->addr, p->addr_type);
5888 list_del(&p->action);
5889 list_del(&p->list);
5890 kfree(p);
5891 }
5892
5893 BT_DBG("All LE connection parameters were removed");
5894
5895 __hci_update_background_scan(&req);
5896 }
5897
5898 complete:
5899 err = hci_req_run(&req, remove_device_complete);
5900 if (err < 0) {
5901 /* ENODATA means no HCI commands were needed (e.g. if
5902 * the adapter is powered off).
5903 */
5904 if (err == -ENODATA)
5905 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5906 mgmt_pending_remove(cmd);
5907 }
5908
5909 unlock:
5910 hci_dev_unlock(hdev);
5911 return err;
5912 }
5913
5914 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5915 u16 len)
5916 {
5917 struct mgmt_cp_load_conn_param *cp = data;
5918 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5919 sizeof(struct mgmt_conn_param));
5920 u16 param_count, expected_len;
5921 int i;
5922
5923 if (!lmp_le_capable(hdev))
5924 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5925 MGMT_STATUS_NOT_SUPPORTED);
5926
5927 param_count = __le16_to_cpu(cp->param_count);
5928 if (param_count > max_param_count) {
5929 BT_ERR("load_conn_param: too big param_count value %u",
5930 param_count);
5931 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5932 MGMT_STATUS_INVALID_PARAMS);
5933 }
5934
5935 expected_len = sizeof(*cp) + param_count *
5936 sizeof(struct mgmt_conn_param);
5937 if (expected_len != len) {
5938 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5939 expected_len, len);
5940 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5941 MGMT_STATUS_INVALID_PARAMS);
5942 }
5943
5944 BT_DBG("%s param_count %u", hdev->name, param_count);
5945
5946 hci_dev_lock(hdev);
5947
5948 hci_conn_params_clear_disabled(hdev);
5949
5950 for (i = 0; i < param_count; i++) {
5951 struct mgmt_conn_param *param = &cp->params[i];
5952 struct hci_conn_params *hci_param;
5953 u16 min, max, latency, timeout;
5954 u8 addr_type;
5955
5956 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5957 param->addr.type);
5958
5959 if (param->addr.type == BDADDR_LE_PUBLIC) {
5960 addr_type = ADDR_LE_DEV_PUBLIC;
5961 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5962 addr_type = ADDR_LE_DEV_RANDOM;
5963 } else {
5964 BT_ERR("Ignoring invalid connection parameters");
5965 continue;
5966 }
5967
5968 min = le16_to_cpu(param->min_interval);
5969 max = le16_to_cpu(param->max_interval);
5970 latency = le16_to_cpu(param->latency);
5971 timeout = le16_to_cpu(param->timeout);
5972
5973 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5974 min, max, latency, timeout);
5975
5976 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5977 BT_ERR("Ignoring invalid connection parameters");
5978 continue;
5979 }
5980
5981 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5982 addr_type);
5983 if (!hci_param) {
5984 BT_ERR("Failed to add connection parameters");
5985 continue;
5986 }
5987
5988 hci_param->conn_min_interval = min;
5989 hci_param->conn_max_interval = max;
5990 hci_param->conn_latency = latency;
5991 hci_param->supervision_timeout = timeout;
5992 }
5993
5994 hci_dev_unlock(hdev);
5995
5996 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5997 }
5998
5999 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6000 void *data, u16 len)
6001 {
6002 struct mgmt_cp_set_external_config *cp = data;
6003 bool changed;
6004 int err;
6005
6006 BT_DBG("%s", hdev->name);
6007
6008 if (hdev_is_powered(hdev))
6009 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6010 MGMT_STATUS_REJECTED);
6011
6012 if (cp->config != 0x00 && cp->config != 0x01)
6013 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6014 MGMT_STATUS_INVALID_PARAMS);
6015
6016 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6017 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6018 MGMT_STATUS_NOT_SUPPORTED);
6019
6020 hci_dev_lock(hdev);
6021
6022 if (cp->config)
6023 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6024 &hdev->dev_flags);
6025 else
6026 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6027 &hdev->dev_flags);
6028
6029 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6030 if (err < 0)
6031 goto unlock;
6032
6033 if (!changed)
6034 goto unlock;
6035
6036 err = new_options(hdev, sk);
6037
6038 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6039 mgmt_index_removed(hdev);
6040
6041 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6042 set_bit(HCI_CONFIG, &hdev->dev_flags);
6043 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6044
6045 queue_work(hdev->req_workqueue, &hdev->power_on);
6046 } else {
6047 set_bit(HCI_RAW, &hdev->flags);
6048 mgmt_index_added(hdev);
6049 }
6050 }
6051
6052 unlock:
6053 hci_dev_unlock(hdev);
6054 return err;
6055 }
6056
6057 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6058 void *data, u16 len)
6059 {
6060 struct mgmt_cp_set_public_address *cp = data;
6061 bool changed;
6062 int err;
6063
6064 BT_DBG("%s", hdev->name);
6065
6066 if (hdev_is_powered(hdev))
6067 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6068 MGMT_STATUS_REJECTED);
6069
6070 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6071 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6072 MGMT_STATUS_INVALID_PARAMS);
6073
6074 if (!hdev->set_bdaddr)
6075 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6076 MGMT_STATUS_NOT_SUPPORTED);
6077
6078 hci_dev_lock(hdev);
6079
6080 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6081 bacpy(&hdev->public_addr, &cp->bdaddr);
6082
6083 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6084 if (err < 0)
6085 goto unlock;
6086
6087 if (!changed)
6088 goto unlock;
6089
6090 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6091 err = new_options(hdev, sk);
6092
6093 if (is_configured(hdev)) {
6094 mgmt_index_removed(hdev);
6095
6096 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6097
6098 set_bit(HCI_CONFIG, &hdev->dev_flags);
6099 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6100
6101 queue_work(hdev->req_workqueue, &hdev->power_on);
6102 }
6103
6104 unlock:
6105 hci_dev_unlock(hdev);
6106 return err;
6107 }
6108
6109 static const struct mgmt_handler {
6110 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6111 u16 data_len);
6112 bool var_len;
6113 size_t data_len;
6114 } mgmt_handlers[] = {
6115 { NULL }, /* 0x0000 (no command) */
6116 { read_version, false, MGMT_READ_VERSION_SIZE },
6117 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6118 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6119 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6120 { set_powered, false, MGMT_SETTING_SIZE },
6121 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6122 { set_connectable, false, MGMT_SETTING_SIZE },
6123 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6124 { set_bondable, false, MGMT_SETTING_SIZE },
6125 { set_link_security, false, MGMT_SETTING_SIZE },
6126 { set_ssp, false, MGMT_SETTING_SIZE },
6127 { set_hs, false, MGMT_SETTING_SIZE },
6128 { set_le, false, MGMT_SETTING_SIZE },
6129 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6130 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6131 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6132 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6133 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6134 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6135 { disconnect, false, MGMT_DISCONNECT_SIZE },
6136 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6137 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6138 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6139 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6140 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6141 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6142 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6143 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6144 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6145 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6146 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6147 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6148 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6149 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6150 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6151 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6152 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6153 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6154 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6155 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6156 { set_advertising, false, MGMT_SETTING_SIZE },
6157 { set_bredr, false, MGMT_SETTING_SIZE },
6158 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6159 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6160 { set_secure_conn, false, MGMT_SETTING_SIZE },
6161 { set_debug_keys, false, MGMT_SETTING_SIZE },
6162 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6163 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6164 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6165 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6166 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6167 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6168 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6169 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6170 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6171 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6172 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6173 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6174 };
6175
6176 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6177 {
6178 void *buf;
6179 u8 *cp;
6180 struct mgmt_hdr *hdr;
6181 u16 opcode, index, len;
6182 struct hci_dev *hdev = NULL;
6183 const struct mgmt_handler *handler;
6184 int err;
6185
6186 BT_DBG("got %zu bytes", msglen);
6187
6188 if (msglen < sizeof(*hdr))
6189 return -EINVAL;
6190
6191 buf = kmalloc(msglen, GFP_KERNEL);
6192 if (!buf)
6193 return -ENOMEM;
6194
6195 if (memcpy_from_msg(buf, msg, msglen)) {
6196 err = -EFAULT;
6197 goto done;
6198 }
6199
6200 hdr = buf;
6201 opcode = __le16_to_cpu(hdr->opcode);
6202 index = __le16_to_cpu(hdr->index);
6203 len = __le16_to_cpu(hdr->len);
6204
6205 if (len != msglen - sizeof(*hdr)) {
6206 err = -EINVAL;
6207 goto done;
6208 }
6209
6210 if (index != MGMT_INDEX_NONE) {
6211 hdev = hci_dev_get(index);
6212 if (!hdev) {
6213 err = cmd_status(sk, index, opcode,
6214 MGMT_STATUS_INVALID_INDEX);
6215 goto done;
6216 }
6217
6218 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6219 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6220 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6221 err = cmd_status(sk, index, opcode,
6222 MGMT_STATUS_INVALID_INDEX);
6223 goto done;
6224 }
6225
6226 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6227 opcode != MGMT_OP_READ_CONFIG_INFO &&
6228 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6229 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6230 err = cmd_status(sk, index, opcode,
6231 MGMT_STATUS_INVALID_INDEX);
6232 goto done;
6233 }
6234 }
6235
6236 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6237 mgmt_handlers[opcode].func == NULL) {
6238 BT_DBG("Unknown op %u", opcode);
6239 err = cmd_status(sk, index, opcode,
6240 MGMT_STATUS_UNKNOWN_COMMAND);
6241 goto done;
6242 }
6243
6244 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6245 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6246 err = cmd_status(sk, index, opcode,
6247 MGMT_STATUS_INVALID_INDEX);
6248 goto done;
6249 }
6250
6251 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6252 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6253 err = cmd_status(sk, index, opcode,
6254 MGMT_STATUS_INVALID_INDEX);
6255 goto done;
6256 }
6257
6258 handler = &mgmt_handlers[opcode];
6259
6260 if ((handler->var_len && len < handler->data_len) ||
6261 (!handler->var_len && len != handler->data_len)) {
6262 err = cmd_status(sk, index, opcode,
6263 MGMT_STATUS_INVALID_PARAMS);
6264 goto done;
6265 }
6266
6267 if (hdev)
6268 mgmt_init_hdev(sk, hdev);
6269
6270 cp = buf + sizeof(*hdr);
6271
6272 err = handler->func(sk, hdev, cp, len);
6273 if (err < 0)
6274 goto done;
6275
6276 err = msglen;
6277
6278 done:
6279 if (hdev)
6280 hci_dev_put(hdev);
6281
6282 kfree(buf);
6283 return err;
6284 }
6285
6286 void mgmt_index_added(struct hci_dev *hdev)
6287 {
6288 if (hdev->dev_type != HCI_BREDR)
6289 return;
6290
6291 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6292 return;
6293
6294 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6295 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6296 else
6297 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6298 }
6299
6300 void mgmt_index_removed(struct hci_dev *hdev)
6301 {
6302 u8 status = MGMT_STATUS_INVALID_INDEX;
6303
6304 if (hdev->dev_type != HCI_BREDR)
6305 return;
6306
6307 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6308 return;
6309
6310 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6311
6312 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6313 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6314 else
6315 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6316 }
6317
6318 /* This function requires the caller holds hdev->lock */
6319 static void restart_le_actions(struct hci_request *req)
6320 {
6321 struct hci_dev *hdev = req->hdev;
6322 struct hci_conn_params *p;
6323
6324 list_for_each_entry(p, &hdev->le_conn_params, list) {
6325 /* Needed for AUTO_OFF case where might not "really"
6326 * have been powered off.
6327 */
6328 list_del_init(&p->action);
6329
6330 switch (p->auto_connect) {
6331 case HCI_AUTO_CONN_DIRECT:
6332 case HCI_AUTO_CONN_ALWAYS:
6333 list_add(&p->action, &hdev->pend_le_conns);
6334 break;
6335 case HCI_AUTO_CONN_REPORT:
6336 list_add(&p->action, &hdev->pend_le_reports);
6337 break;
6338 default:
6339 break;
6340 }
6341 }
6342
6343 __hci_update_background_scan(req);
6344 }
6345
6346 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6347 {
6348 struct cmd_lookup match = { NULL, hdev };
6349
6350 BT_DBG("status 0x%02x", status);
6351
6352 if (!status) {
6353 /* Register the available SMP channels (BR/EDR and LE) only
6354 * when successfully powering on the controller. This late
6355 * registration is required so that LE SMP can clearly
6356 * decide if the public address or static address is used.
6357 */
6358 smp_register(hdev);
6359 }
6360
6361 hci_dev_lock(hdev);
6362
6363 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6364
6365 new_settings(hdev, match.sk);
6366
6367 hci_dev_unlock(hdev);
6368
6369 if (match.sk)
6370 sock_put(match.sk);
6371 }
6372
6373 static int powered_update_hci(struct hci_dev *hdev)
6374 {
6375 struct hci_request req;
6376 u8 link_sec;
6377
6378 hci_req_init(&req, hdev);
6379
6380 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6381 !lmp_host_ssp_capable(hdev)) {
6382 u8 mode = 0x01;
6383
6384 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6385
6386 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6387 u8 support = 0x01;
6388
6389 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6390 sizeof(support), &support);
6391 }
6392 }
6393
6394 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6395 lmp_bredr_capable(hdev)) {
6396 struct hci_cp_write_le_host_supported cp;
6397
6398 cp.le = 0x01;
6399 cp.simul = 0x00;
6400
6401 /* Check first if we already have the right
6402 * host state (host features set)
6403 */
6404 if (cp.le != lmp_host_le_capable(hdev) ||
6405 cp.simul != lmp_host_le_br_capable(hdev))
6406 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6407 sizeof(cp), &cp);
6408 }
6409
6410 if (lmp_le_capable(hdev)) {
6411 /* Make sure the controller has a good default for
6412 * advertising data. This also applies to the case
6413 * where BR/EDR was toggled during the AUTO_OFF phase.
6414 */
6415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6416 update_adv_data(&req);
6417 update_scan_rsp_data(&req);
6418 }
6419
6420 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6421 enable_advertising(&req);
6422
6423 restart_le_actions(&req);
6424 }
6425
6426 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6427 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6428 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6429 sizeof(link_sec), &link_sec);
6430
6431 if (lmp_bredr_capable(hdev)) {
6432 write_fast_connectable(&req, false);
6433 __hci_update_page_scan(&req);
6434 update_class(&req);
6435 update_name(&req);
6436 update_eir(&req);
6437 }
6438
6439 return hci_req_run(&req, powered_complete);
6440 }
6441
6442 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6443 {
6444 struct cmd_lookup match = { NULL, hdev };
6445 u8 status, zero_cod[] = { 0, 0, 0 };
6446 int err;
6447
6448 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6449 return 0;
6450
6451 if (powered) {
6452 if (powered_update_hci(hdev) == 0)
6453 return 0;
6454
6455 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6456 &match);
6457 goto new_settings;
6458 }
6459
6460 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6461
6462 /* If the power off is because of hdev unregistration let
6463 * use the appropriate INVALID_INDEX status. Otherwise use
6464 * NOT_POWERED. We cover both scenarios here since later in
6465 * mgmt_index_removed() any hci_conn callbacks will have already
6466 * been triggered, potentially causing misleading DISCONNECTED
6467 * status responses.
6468 */
6469 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6470 status = MGMT_STATUS_INVALID_INDEX;
6471 else
6472 status = MGMT_STATUS_NOT_POWERED;
6473
6474 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6475
6476 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6477 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6478 zero_cod, sizeof(zero_cod), NULL);
6479
6480 new_settings:
6481 err = new_settings(hdev, match.sk);
6482
6483 if (match.sk)
6484 sock_put(match.sk);
6485
6486 return err;
6487 }
6488
6489 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6490 {
6491 struct pending_cmd *cmd;
6492 u8 status;
6493
6494 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6495 if (!cmd)
6496 return;
6497
6498 if (err == -ERFKILL)
6499 status = MGMT_STATUS_RFKILLED;
6500 else
6501 status = MGMT_STATUS_FAILED;
6502
6503 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6504
6505 mgmt_pending_remove(cmd);
6506 }
6507
6508 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6509 {
6510 struct hci_request req;
6511
6512 hci_dev_lock(hdev);
6513
6514 /* When discoverable timeout triggers, then just make sure
6515 * the limited discoverable flag is cleared. Even in the case
6516 * of a timeout triggered from general discoverable, it is
6517 * safe to unconditionally clear the flag.
6518 */
6519 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6520 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6521
6522 hci_req_init(&req, hdev);
6523 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6524 u8 scan = SCAN_PAGE;
6525 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6526 sizeof(scan), &scan);
6527 }
6528 update_class(&req);
6529 update_adv_data(&req);
6530 hci_req_run(&req, NULL);
6531
6532 hdev->discov_timeout = 0;
6533
6534 new_settings(hdev, NULL);
6535
6536 hci_dev_unlock(hdev);
6537 }
6538
6539 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6540 bool persistent)
6541 {
6542 struct mgmt_ev_new_link_key ev;
6543
6544 memset(&ev, 0, sizeof(ev));
6545
6546 ev.store_hint = persistent;
6547 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6548 ev.key.addr.type = BDADDR_BREDR;
6549 ev.key.type = key->type;
6550 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6551 ev.key.pin_len = key->pin_len;
6552
6553 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6554 }
6555
6556 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6557 {
6558 switch (ltk->type) {
6559 case SMP_LTK:
6560 case SMP_LTK_SLAVE:
6561 if (ltk->authenticated)
6562 return MGMT_LTK_AUTHENTICATED;
6563 return MGMT_LTK_UNAUTHENTICATED;
6564 case SMP_LTK_P256:
6565 if (ltk->authenticated)
6566 return MGMT_LTK_P256_AUTH;
6567 return MGMT_LTK_P256_UNAUTH;
6568 case SMP_LTK_P256_DEBUG:
6569 return MGMT_LTK_P256_DEBUG;
6570 }
6571
6572 return MGMT_LTK_UNAUTHENTICATED;
6573 }
6574
6575 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6576 {
6577 struct mgmt_ev_new_long_term_key ev;
6578
6579 memset(&ev, 0, sizeof(ev));
6580
6581 /* Devices using resolvable or non-resolvable random addresses
6582 * without providing an indentity resolving key don't require
6583 * to store long term keys. Their addresses will change the
6584 * next time around.
6585 *
6586 * Only when a remote device provides an identity address
6587 * make sure the long term key is stored. If the remote
6588 * identity is known, the long term keys are internally
6589 * mapped to the identity address. So allow static random
6590 * and public addresses here.
6591 */
6592 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6593 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6594 ev.store_hint = 0x00;
6595 else
6596 ev.store_hint = persistent;
6597
6598 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6599 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6600 ev.key.type = mgmt_ltk_type(key);
6601 ev.key.enc_size = key->enc_size;
6602 ev.key.ediv = key->ediv;
6603 ev.key.rand = key->rand;
6604
6605 if (key->type == SMP_LTK)
6606 ev.key.master = 1;
6607
6608 memcpy(ev.key.val, key->val, sizeof(key->val));
6609
6610 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6611 }
6612
6613 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6614 {
6615 struct mgmt_ev_new_irk ev;
6616
6617 memset(&ev, 0, sizeof(ev));
6618
6619 /* For identity resolving keys from devices that are already
6620 * using a public address or static random address, do not
6621 * ask for storing this key. The identity resolving key really
6622 * is only mandatory for devices using resovlable random
6623 * addresses.
6624 *
6625 * Storing all identity resolving keys has the downside that
6626 * they will be also loaded on next boot of they system. More
6627 * identity resolving keys, means more time during scanning is
6628 * needed to actually resolve these addresses.
6629 */
6630 if (bacmp(&irk->rpa, BDADDR_ANY))
6631 ev.store_hint = 0x01;
6632 else
6633 ev.store_hint = 0x00;
6634
6635 bacpy(&ev.rpa, &irk->rpa);
6636 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6637 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6638 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6639
6640 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6641 }
6642
6643 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6644 bool persistent)
6645 {
6646 struct mgmt_ev_new_csrk ev;
6647
6648 memset(&ev, 0, sizeof(ev));
6649
6650 /* Devices using resolvable or non-resolvable random addresses
6651 * without providing an indentity resolving key don't require
6652 * to store signature resolving keys. Their addresses will change
6653 * the next time around.
6654 *
6655 * Only when a remote device provides an identity address
6656 * make sure the signature resolving key is stored. So allow
6657 * static random and public addresses here.
6658 */
6659 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6660 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6661 ev.store_hint = 0x00;
6662 else
6663 ev.store_hint = persistent;
6664
6665 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6666 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6667 ev.key.type = csrk->type;
6668 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6669
6670 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6671 }
6672
6673 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6674 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6675 u16 max_interval, u16 latency, u16 timeout)
6676 {
6677 struct mgmt_ev_new_conn_param ev;
6678
6679 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6680 return;
6681
6682 memset(&ev, 0, sizeof(ev));
6683 bacpy(&ev.addr.bdaddr, bdaddr);
6684 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6685 ev.store_hint = store_hint;
6686 ev.min_interval = cpu_to_le16(min_interval);
6687 ev.max_interval = cpu_to_le16(max_interval);
6688 ev.latency = cpu_to_le16(latency);
6689 ev.timeout = cpu_to_le16(timeout);
6690
6691 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6692 }
6693
6694 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6695 u8 data_len)
6696 {
6697 eir[eir_len++] = sizeof(type) + data_len;
6698 eir[eir_len++] = type;
6699 memcpy(&eir[eir_len], data, data_len);
6700 eir_len += data_len;
6701
6702 return eir_len;
6703 }
6704
6705 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6706 u32 flags, u8 *name, u8 name_len)
6707 {
6708 char buf[512];
6709 struct mgmt_ev_device_connected *ev = (void *) buf;
6710 u16 eir_len = 0;
6711
6712 bacpy(&ev->addr.bdaddr, &conn->dst);
6713 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6714
6715 ev->flags = __cpu_to_le32(flags);
6716
6717 /* We must ensure that the EIR Data fields are ordered and
6718 * unique. Keep it simple for now and avoid the problem by not
6719 * adding any BR/EDR data to the LE adv.
6720 */
6721 if (conn->le_adv_data_len > 0) {
6722 memcpy(&ev->eir[eir_len],
6723 conn->le_adv_data, conn->le_adv_data_len);
6724 eir_len = conn->le_adv_data_len;
6725 } else {
6726 if (name_len > 0)
6727 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6728 name, name_len);
6729
6730 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6731 eir_len = eir_append_data(ev->eir, eir_len,
6732 EIR_CLASS_OF_DEV,
6733 conn->dev_class, 3);
6734 }
6735
6736 ev->eir_len = cpu_to_le16(eir_len);
6737
6738 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6739 sizeof(*ev) + eir_len, NULL);
6740 }
6741
6742 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6743 {
6744 struct sock **sk = data;
6745
6746 cmd->cmd_complete(cmd, 0);
6747
6748 *sk = cmd->sk;
6749 sock_hold(*sk);
6750
6751 mgmt_pending_remove(cmd);
6752 }
6753
6754 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6755 {
6756 struct hci_dev *hdev = data;
6757 struct mgmt_cp_unpair_device *cp = cmd->param;
6758
6759 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6760
6761 cmd->cmd_complete(cmd, 0);
6762 mgmt_pending_remove(cmd);
6763 }
6764
6765 bool mgmt_powering_down(struct hci_dev *hdev)
6766 {
6767 struct pending_cmd *cmd;
6768 struct mgmt_mode *cp;
6769
6770 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6771 if (!cmd)
6772 return false;
6773
6774 cp = cmd->param;
6775 if (!cp->val)
6776 return true;
6777
6778 return false;
6779 }
6780
6781 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6782 u8 link_type, u8 addr_type, u8 reason,
6783 bool mgmt_connected)
6784 {
6785 struct mgmt_ev_device_disconnected ev;
6786 struct sock *sk = NULL;
6787
6788 /* The connection is still in hci_conn_hash so test for 1
6789 * instead of 0 to know if this is the last one.
6790 */
6791 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6792 cancel_delayed_work(&hdev->power_off);
6793 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6794 }
6795
6796 if (!mgmt_connected)
6797 return;
6798
6799 if (link_type != ACL_LINK && link_type != LE_LINK)
6800 return;
6801
6802 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6803
6804 bacpy(&ev.addr.bdaddr, bdaddr);
6805 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6806 ev.reason = reason;
6807
6808 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6809
6810 if (sk)
6811 sock_put(sk);
6812
6813 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6814 hdev);
6815 }
6816
6817 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6818 u8 link_type, u8 addr_type, u8 status)
6819 {
6820 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6821 struct mgmt_cp_disconnect *cp;
6822 struct pending_cmd *cmd;
6823
6824 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6825 hdev);
6826
6827 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6828 if (!cmd)
6829 return;
6830
6831 cp = cmd->param;
6832
6833 if (bacmp(bdaddr, &cp->addr.bdaddr))
6834 return;
6835
6836 if (cp->addr.type != bdaddr_type)
6837 return;
6838
6839 cmd->cmd_complete(cmd, mgmt_status(status));
6840 mgmt_pending_remove(cmd);
6841 }
6842
6843 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6844 u8 addr_type, u8 status)
6845 {
6846 struct mgmt_ev_connect_failed ev;
6847
6848 /* The connection is still in hci_conn_hash so test for 1
6849 * instead of 0 to know if this is the last one.
6850 */
6851 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6852 cancel_delayed_work(&hdev->power_off);
6853 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6854 }
6855
6856 bacpy(&ev.addr.bdaddr, bdaddr);
6857 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6858 ev.status = mgmt_status(status);
6859
6860 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6861 }
6862
6863 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6864 {
6865 struct mgmt_ev_pin_code_request ev;
6866
6867 bacpy(&ev.addr.bdaddr, bdaddr);
6868 ev.addr.type = BDADDR_BREDR;
6869 ev.secure = secure;
6870
6871 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6872 }
6873
6874 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6875 u8 status)
6876 {
6877 struct pending_cmd *cmd;
6878
6879 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6880 if (!cmd)
6881 return;
6882
6883 cmd->cmd_complete(cmd, mgmt_status(status));
6884 mgmt_pending_remove(cmd);
6885 }
6886
6887 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6888 u8 status)
6889 {
6890 struct pending_cmd *cmd;
6891
6892 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6893 if (!cmd)
6894 return;
6895
6896 cmd->cmd_complete(cmd, mgmt_status(status));
6897 mgmt_pending_remove(cmd);
6898 }
6899
6900 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6901 u8 link_type, u8 addr_type, u32 value,
6902 u8 confirm_hint)
6903 {
6904 struct mgmt_ev_user_confirm_request ev;
6905
6906 BT_DBG("%s", hdev->name);
6907
6908 bacpy(&ev.addr.bdaddr, bdaddr);
6909 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6910 ev.confirm_hint = confirm_hint;
6911 ev.value = cpu_to_le32(value);
6912
6913 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6914 NULL);
6915 }
6916
6917 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6918 u8 link_type, u8 addr_type)
6919 {
6920 struct mgmt_ev_user_passkey_request ev;
6921
6922 BT_DBG("%s", hdev->name);
6923
6924 bacpy(&ev.addr.bdaddr, bdaddr);
6925 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6926
6927 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6928 NULL);
6929 }
6930
6931 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6932 u8 link_type, u8 addr_type, u8 status,
6933 u8 opcode)
6934 {
6935 struct pending_cmd *cmd;
6936
6937 cmd = mgmt_pending_find(opcode, hdev);
6938 if (!cmd)
6939 return -ENOENT;
6940
6941 cmd->cmd_complete(cmd, mgmt_status(status));
6942 mgmt_pending_remove(cmd);
6943
6944 return 0;
6945 }
6946
6947 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6948 u8 link_type, u8 addr_type, u8 status)
6949 {
6950 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6951 status, MGMT_OP_USER_CONFIRM_REPLY);
6952 }
6953
6954 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6955 u8 link_type, u8 addr_type, u8 status)
6956 {
6957 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6958 status,
6959 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6960 }
6961
6962 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6963 u8 link_type, u8 addr_type, u8 status)
6964 {
6965 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6966 status, MGMT_OP_USER_PASSKEY_REPLY);
6967 }
6968
6969 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6970 u8 link_type, u8 addr_type, u8 status)
6971 {
6972 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6973 status,
6974 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6975 }
6976
6977 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6978 u8 link_type, u8 addr_type, u32 passkey,
6979 u8 entered)
6980 {
6981 struct mgmt_ev_passkey_notify ev;
6982
6983 BT_DBG("%s", hdev->name);
6984
6985 bacpy(&ev.addr.bdaddr, bdaddr);
6986 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6987 ev.passkey = __cpu_to_le32(passkey);
6988 ev.entered = entered;
6989
6990 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6991 }
6992
6993 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6994 {
6995 struct mgmt_ev_auth_failed ev;
6996 struct pending_cmd *cmd;
6997 u8 status = mgmt_status(hci_status);
6998
6999 bacpy(&ev.addr.bdaddr, &conn->dst);
7000 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7001 ev.status = status;
7002
7003 cmd = find_pairing(conn);
7004
7005 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7006 cmd ? cmd->sk : NULL);
7007
7008 if (cmd) {
7009 cmd->cmd_complete(cmd, status);
7010 mgmt_pending_remove(cmd);
7011 }
7012 }
7013
7014 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7015 {
7016 struct cmd_lookup match = { NULL, hdev };
7017 bool changed;
7018
7019 if (status) {
7020 u8 mgmt_err = mgmt_status(status);
7021 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7022 cmd_status_rsp, &mgmt_err);
7023 return;
7024 }
7025
7026 if (test_bit(HCI_AUTH, &hdev->flags))
7027 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7028 &hdev->dev_flags);
7029 else
7030 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7031 &hdev->dev_flags);
7032
7033 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7034 &match);
7035
7036 if (changed)
7037 new_settings(hdev, match.sk);
7038
7039 if (match.sk)
7040 sock_put(match.sk);
7041 }
7042
7043 static void clear_eir(struct hci_request *req)
7044 {
7045 struct hci_dev *hdev = req->hdev;
7046 struct hci_cp_write_eir cp;
7047
7048 if (!lmp_ext_inq_capable(hdev))
7049 return;
7050
7051 memset(hdev->eir, 0, sizeof(hdev->eir));
7052
7053 memset(&cp, 0, sizeof(cp));
7054
7055 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7056 }
7057
7058 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7059 {
7060 struct cmd_lookup match = { NULL, hdev };
7061 struct hci_request req;
7062 bool changed = false;
7063
7064 if (status) {
7065 u8 mgmt_err = mgmt_status(status);
7066
7067 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7068 &hdev->dev_flags)) {
7069 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7070 new_settings(hdev, NULL);
7071 }
7072
7073 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7074 &mgmt_err);
7075 return;
7076 }
7077
7078 if (enable) {
7079 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7080 } else {
7081 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7082 if (!changed)
7083 changed = test_and_clear_bit(HCI_HS_ENABLED,
7084 &hdev->dev_flags);
7085 else
7086 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7087 }
7088
7089 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7090
7091 if (changed)
7092 new_settings(hdev, match.sk);
7093
7094 if (match.sk)
7095 sock_put(match.sk);
7096
7097 hci_req_init(&req, hdev);
7098
7099 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7100 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7101 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7102 sizeof(enable), &enable);
7103 update_eir(&req);
7104 } else {
7105 clear_eir(&req);
7106 }
7107
7108 hci_req_run(&req, NULL);
7109 }
7110
7111 static void sk_lookup(struct pending_cmd *cmd, void *data)
7112 {
7113 struct cmd_lookup *match = data;
7114
7115 if (match->sk == NULL) {
7116 match->sk = cmd->sk;
7117 sock_hold(match->sk);
7118 }
7119 }
7120
7121 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7122 u8 status)
7123 {
7124 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7125
7126 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7127 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7128 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7129
7130 if (!status)
7131 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7132 NULL);
7133
7134 if (match.sk)
7135 sock_put(match.sk);
7136 }
7137
7138 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7139 {
7140 struct mgmt_cp_set_local_name ev;
7141 struct pending_cmd *cmd;
7142
7143 if (status)
7144 return;
7145
7146 memset(&ev, 0, sizeof(ev));
7147 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7148 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7149
7150 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7151 if (!cmd) {
7152 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7153
7154 /* If this is a HCI command related to powering on the
7155 * HCI dev don't send any mgmt signals.
7156 */
7157 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7158 return;
7159 }
7160
7161 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7162 cmd ? cmd->sk : NULL);
7163 }
7164
7165 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7166 u8 *rand192, u8 *hash256, u8 *rand256,
7167 u8 status)
7168 {
7169 struct pending_cmd *cmd;
7170
7171 BT_DBG("%s status %u", hdev->name, status);
7172
7173 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7174 if (!cmd)
7175 return;
7176
7177 if (status) {
7178 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7179 mgmt_status(status));
7180 } else {
7181 struct mgmt_rp_read_local_oob_data rp;
7182 size_t rp_size = sizeof(rp);
7183
7184 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7185 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7186
7187 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7188 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7189 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7190 } else {
7191 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7192 }
7193
7194 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7195 &rp, rp_size);
7196 }
7197
7198 mgmt_pending_remove(cmd);
7199 }
7200
7201 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7202 {
7203 int i;
7204
7205 for (i = 0; i < uuid_count; i++) {
7206 if (!memcmp(uuid, uuids[i], 16))
7207 return true;
7208 }
7209
7210 return false;
7211 }
7212
7213 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7214 {
7215 u16 parsed = 0;
7216
7217 while (parsed < eir_len) {
7218 u8 field_len = eir[0];
7219 u8 uuid[16];
7220 int i;
7221
7222 if (field_len == 0)
7223 break;
7224
7225 if (eir_len - parsed < field_len + 1)
7226 break;
7227
7228 switch (eir[1]) {
7229 case EIR_UUID16_ALL:
7230 case EIR_UUID16_SOME:
7231 for (i = 0; i + 3 <= field_len; i += 2) {
7232 memcpy(uuid, bluetooth_base_uuid, 16);
7233 uuid[13] = eir[i + 3];
7234 uuid[12] = eir[i + 2];
7235 if (has_uuid(uuid, uuid_count, uuids))
7236 return true;
7237 }
7238 break;
7239 case EIR_UUID32_ALL:
7240 case EIR_UUID32_SOME:
7241 for (i = 0; i + 5 <= field_len; i += 4) {
7242 memcpy(uuid, bluetooth_base_uuid, 16);
7243 uuid[15] = eir[i + 5];
7244 uuid[14] = eir[i + 4];
7245 uuid[13] = eir[i + 3];
7246 uuid[12] = eir[i + 2];
7247 if (has_uuid(uuid, uuid_count, uuids))
7248 return true;
7249 }
7250 break;
7251 case EIR_UUID128_ALL:
7252 case EIR_UUID128_SOME:
7253 for (i = 0; i + 17 <= field_len; i += 16) {
7254 memcpy(uuid, eir + i + 2, 16);
7255 if (has_uuid(uuid, uuid_count, uuids))
7256 return true;
7257 }
7258 break;
7259 }
7260
7261 parsed += field_len + 1;
7262 eir += field_len + 1;
7263 }
7264
7265 return false;
7266 }
7267
7268 static void restart_le_scan(struct hci_dev *hdev)
7269 {
7270 /* If controller is not scanning we are done. */
7271 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7272 return;
7273
7274 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7275 hdev->discovery.scan_start +
7276 hdev->discovery.scan_duration))
7277 return;
7278
7279 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7280 DISCOV_LE_RESTART_DELAY);
7281 }
7282
7283 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7284 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7285 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7286 {
7287 char buf[512];
7288 struct mgmt_ev_device_found *ev = (void *) buf;
7289 size_t ev_size;
7290 bool match;
7291
7292 /* Don't send events for a non-kernel initiated discovery. With
7293 * LE one exception is if we have pend_le_reports > 0 in which
7294 * case we're doing passive scanning and want these events.
7295 */
7296 if (!hci_discovery_active(hdev)) {
7297 if (link_type == ACL_LINK)
7298 return;
7299 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7300 return;
7301 }
7302
7303 /* When using service discovery with a RSSI threshold, then check
7304 * if such a RSSI threshold is specified. If a RSSI threshold has
7305 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
7306 * then all results with a RSSI smaller than the RSSI threshold will be
7307 * dropped. If the quirk is set, let it through for further processing,
7308 * as we might need to restart the scan.
7309 *
7310 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7311 * the results are also dropped.
7312 */
7313 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7314 (rssi == HCI_RSSI_INVALID ||
7315 (rssi < hdev->discovery.rssi &&
7316 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7317 return;
7318
7319 /* Make sure that the buffer is big enough. The 5 extra bytes
7320 * are for the potential CoD field.
7321 */
7322 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7323 return;
7324
7325 memset(buf, 0, sizeof(buf));
7326
7327 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7328 * RSSI value was reported as 0 when not available. This behavior
7329 * is kept when using device discovery. This is required for full
7330 * backwards compatibility with the API.
7331 *
7332 * However when using service discovery, the value 127 will be
7333 * returned when the RSSI is not available.
7334 */
7335 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7336 link_type == ACL_LINK)
7337 rssi = 0;
7338
7339 bacpy(&ev->addr.bdaddr, bdaddr);
7340 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7341 ev->rssi = rssi;
7342 ev->flags = cpu_to_le32(flags);
7343
7344 if (eir_len > 0) {
7345 /* When using service discovery and a list of UUID is
7346 * provided, results with no matching UUID should be
7347 * dropped. In case there is a match the result is
7348 * kept and checking possible scan response data
7349 * will be skipped.
7350 */
7351 if (hdev->discovery.uuid_count > 0) {
7352 match = eir_has_uuids(eir, eir_len,
7353 hdev->discovery.uuid_count,
7354 hdev->discovery.uuids);
7355 /* If duplicate filtering does not report RSSI changes,
7356 * then restart scanning to ensure updated result with
7357 * updated RSSI values.
7358 */
7359 if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7360 &hdev->quirks))
7361 restart_le_scan(hdev);
7362 } else {
7363 match = true;
7364 }
7365
7366 if (!match && !scan_rsp_len)
7367 return;
7368
7369 /* Copy EIR or advertising data into event */
7370 memcpy(ev->eir, eir, eir_len);
7371 } else {
7372 /* When using service discovery and a list of UUID is
7373 * provided, results with empty EIR or advertising data
7374 * should be dropped since they do not match any UUID.
7375 */
7376 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7377 return;
7378
7379 match = false;
7380 }
7381
7382 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7383 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7384 dev_class, 3);
7385
7386 if (scan_rsp_len > 0) {
7387 /* When using service discovery and a list of UUID is
7388 * provided, results with no matching UUID should be
7389 * dropped if there is no previous match from the
7390 * advertising data.
7391 */
7392 if (hdev->discovery.uuid_count > 0) {
7393 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7394 hdev->discovery.uuid_count,
7395 hdev->discovery.uuids))
7396 return;
7397
7398 /* If duplicate filtering does not report RSSI changes,
7399 * then restart scanning to ensure updated result with
7400 * updated RSSI values.
7401 */
7402 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7403 &hdev->quirks))
7404 restart_le_scan(hdev);
7405 }
7406
7407 /* Append scan response data to event */
7408 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7409 } else {
7410 /* When using service discovery and a list of UUID is
7411 * provided, results with empty scan response and no
7412 * previous matched advertising data should be dropped.
7413 */
7414 if (hdev->discovery.uuid_count > 0 && !match)
7415 return;
7416 }
7417
7418 /* Validate the reported RSSI value against the RSSI threshold once more
7419 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7420 * scanning.
7421 */
7422 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7423 rssi < hdev->discovery.rssi)
7424 return;
7425
7426 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7427 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7428
7429 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7430 }
7431
7432 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7433 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7434 {
7435 struct mgmt_ev_device_found *ev;
7436 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7437 u16 eir_len;
7438
7439 ev = (struct mgmt_ev_device_found *) buf;
7440
7441 memset(buf, 0, sizeof(buf));
7442
7443 bacpy(&ev->addr.bdaddr, bdaddr);
7444 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7445 ev->rssi = rssi;
7446
7447 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7448 name_len);
7449
7450 ev->eir_len = cpu_to_le16(eir_len);
7451
7452 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7453 }
7454
7455 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7456 {
7457 struct mgmt_ev_discovering ev;
7458
7459 BT_DBG("%s discovering %u", hdev->name, discovering);
7460
7461 memset(&ev, 0, sizeof(ev));
7462 ev.type = hdev->discovery.type;
7463 ev.discovering = discovering;
7464
7465 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7466 }
7467
7468 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7469 {
7470 BT_DBG("%s status %u", hdev->name, status);
7471 }
7472
7473 void mgmt_reenable_advertising(struct hci_dev *hdev)
7474 {
7475 struct hci_request req;
7476
7477 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7478 return;
7479
7480 hci_req_init(&req, hdev);
7481 enable_advertising(&req);
7482 hci_req_run(&req, adv_enable_complete);
7483 }
This page took 0.250435 seconds and 5 git commands to generate.