Bluetooth: Pass initiator/acceptor information to hci_conn_security()
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134
135 struct pending_cmd {
136 struct list_head list;
137 u16 opcode;
138 int index;
139 void *param;
140 struct sock *sk;
141 void *user_data;
142 };
143
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
207 };
208
209 static u8 mgmt_status(u8 hci_status)
210 {
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
213
214 return MGMT_STATUS_FAILED;
215 }
216
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219 {
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245 }
246
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
253
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
259
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
261
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
265
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
269
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
273
274 return err;
275 }
276
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
279 {
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
284
285 BT_DBG("sock %p", sk);
286
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
290
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
292
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
300
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
303
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
307
308 return err;
309 }
310
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_version rp;
315
316 BT_DBG("sock %p", sk);
317
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
320
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
323 }
324
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
327 {
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
334
335 BT_DBG("sock %p", sk);
336
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
342
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
345
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
348
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
351
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
355
356 return err;
357 }
358
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
361 {
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
367
368 BT_DBG("sock %p", sk);
369
370 read_lock(&hci_dev_list_lock);
371
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
377 }
378
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
384 }
385
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
392
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
398
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
403 }
404 }
405
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
408
409 read_unlock(&hci_dev_list_lock);
410
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
413
414 kfree(rp);
415
416 return err;
417 }
418
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421 {
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477 }
478
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490 }
491
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505 }
506
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513 }
514
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521 }
522
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525 {
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549 }
550
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 u32 settings = 0;
554
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
560
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
566
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
570 }
571
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
575 }
576
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
581 }
582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
587 return settings;
588 }
589
590 static u32 get_current_settings(struct hci_dev *hdev)
591 {
592 u32 settings = 0;
593
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
596
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
599
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
602
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
605
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
608
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
611
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
614
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
617
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
620
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
623
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
626
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
629
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
632
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
635
636 return settings;
637 }
638
639 #define PNP_INFO_SVCLASS_ID 0x1200
640
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642 {
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
645
646 if (len < 4)
647 return ptr;
648
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 u16 uuid16;
651
652 if (uuid->size != 16)
653 continue;
654
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 < 0x1100)
657 continue;
658
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
660 continue;
661
662 if (!uuids_start) {
663 uuids_start = ptr;
664 uuids_start[0] = 1;
665 uuids_start[1] = EIR_UUID16_ALL;
666 ptr += 2;
667 }
668
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
672 break;
673 }
674
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
678 }
679
680 return ptr;
681 }
682
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
684 {
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
687
688 if (len < 6)
689 return ptr;
690
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
693 continue;
694
695 if (!uuids_start) {
696 uuids_start = ptr;
697 uuids_start[0] = 1;
698 uuids_start[1] = EIR_UUID32_ALL;
699 ptr += 2;
700 }
701
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
705 break;
706 }
707
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709 ptr += sizeof(u32);
710 uuids_start[0] += sizeof(u32);
711 }
712
713 return ptr;
714 }
715
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 {
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
720
721 if (len < 18)
722 return ptr;
723
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
726 continue;
727
728 if (!uuids_start) {
729 uuids_start = ptr;
730 uuids_start[0] = 1;
731 uuids_start[1] = EIR_UUID128_ALL;
732 ptr += 2;
733 }
734
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
738 break;
739 }
740
741 memcpy(ptr, uuid->uuid, 16);
742 ptr += 16;
743 uuids_start[0] += 16;
744 }
745
746 return ptr;
747 }
748
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
750 {
751 struct pending_cmd *cmd;
752
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
755 return cmd;
756 }
757
758 return NULL;
759 }
760
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764 {
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775 }
776
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
778 {
779 u8 ad_len = 0;
780 size_t name_len;
781
782 name_len = strlen(hdev->dev_name);
783 if (name_len > 0) {
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
785
786 if (name_len > max_len) {
787 name_len = max_len;
788 ptr[1] = EIR_NAME_SHORT;
789 } else
790 ptr[1] = EIR_NAME_COMPLETE;
791
792 ptr[0] = name_len + 1;
793
794 memcpy(ptr + 2, hdev->dev_name, name_len);
795
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
798 }
799
800 return ad_len;
801 }
802
803 static void update_scan_rsp_data(struct hci_request *req)
804 {
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
807 u8 len;
808
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 return;
811
812 memset(&cp, 0, sizeof(cp));
813
814 len = create_scan_rsp_data(hdev, cp.data);
815
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 return;
819
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
822
823 cp.length = len;
824
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826 }
827
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
829 {
830 struct pending_cmd *cmd;
831
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
834 */
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836 if (cmd) {
837 struct mgmt_mode *cp = cmd->param;
838 if (cp->val == 0x01)
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
842 } else {
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
847 }
848
849 return 0;
850 }
851
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
853 {
854 u8 ad_len = 0, flags = 0;
855
856 flags |= get_adv_discov_flags(hdev);
857
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
860
861 if (flags) {
862 BT_DBG("adv flags 0x%02x", flags);
863
864 ptr[0] = 2;
865 ptr[1] = EIR_FLAGS;
866 ptr[2] = flags;
867
868 ad_len += 3;
869 ptr += 3;
870 }
871
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873 ptr[0] = 2;
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
876
877 ad_len += 3;
878 ptr += 3;
879 }
880
881 return ad_len;
882 }
883
884 static void update_adv_data(struct hci_request *req)
885 {
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
888 u8 len;
889
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 return;
892
893 memset(&cp, 0, sizeof(cp));
894
895 len = create_adv_data(hdev, cp.data);
896
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
899 return;
900
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
903
904 cp.length = len;
905
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907 }
908
909 int mgmt_update_adv_data(struct hci_dev *hdev)
910 {
911 struct hci_request req;
912
913 hci_req_init(&req, hdev);
914 update_adv_data(&req);
915
916 return hci_req_run(&req, NULL);
917 }
918
919 static void create_eir(struct hci_dev *hdev, u8 *data)
920 {
921 u8 *ptr = data;
922 size_t name_len;
923
924 name_len = strlen(hdev->dev_name);
925
926 if (name_len > 0) {
927 /* EIR Data type */
928 if (name_len > 48) {
929 name_len = 48;
930 ptr[1] = EIR_NAME_SHORT;
931 } else
932 ptr[1] = EIR_NAME_COMPLETE;
933
934 /* EIR Data length */
935 ptr[0] = name_len + 1;
936
937 memcpy(ptr + 2, hdev->dev_name, name_len);
938
939 ptr += (name_len + 2);
940 }
941
942 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
943 ptr[0] = 2;
944 ptr[1] = EIR_TX_POWER;
945 ptr[2] = (u8) hdev->inq_tx_power;
946
947 ptr += 3;
948 }
949
950 if (hdev->devid_source > 0) {
951 ptr[0] = 9;
952 ptr[1] = EIR_DEVICE_ID;
953
954 put_unaligned_le16(hdev->devid_source, ptr + 2);
955 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
956 put_unaligned_le16(hdev->devid_product, ptr + 6);
957 put_unaligned_le16(hdev->devid_version, ptr + 8);
958
959 ptr += 10;
960 }
961
962 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
963 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 }
966
967 static void update_eir(struct hci_request *req)
968 {
969 struct hci_dev *hdev = req->hdev;
970 struct hci_cp_write_eir cp;
971
972 if (!hdev_is_powered(hdev))
973 return;
974
975 if (!lmp_ext_inq_capable(hdev))
976 return;
977
978 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
979 return;
980
981 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
982 return;
983
984 memset(&cp, 0, sizeof(cp));
985
986 create_eir(hdev, cp.data);
987
988 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
989 return;
990
991 memcpy(hdev->eir, cp.data, sizeof(cp.data));
992
993 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
994 }
995
996 static u8 get_service_classes(struct hci_dev *hdev)
997 {
998 struct bt_uuid *uuid;
999 u8 val = 0;
1000
1001 list_for_each_entry(uuid, &hdev->uuids, list)
1002 val |= uuid->svc_hint;
1003
1004 return val;
1005 }
1006
1007 static void update_class(struct hci_request *req)
1008 {
1009 struct hci_dev *hdev = req->hdev;
1010 u8 cod[3];
1011
1012 BT_DBG("%s", hdev->name);
1013
1014 if (!hdev_is_powered(hdev))
1015 return;
1016
1017 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1018 return;
1019
1020 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1021 return;
1022
1023 cod[0] = hdev->minor_class;
1024 cod[1] = hdev->major_class;
1025 cod[2] = get_service_classes(hdev);
1026
1027 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1028 cod[1] |= 0x20;
1029
1030 if (memcmp(cod, hdev->dev_class, 3) == 0)
1031 return;
1032
1033 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1034 }
1035
1036 static bool get_connectable(struct hci_dev *hdev)
1037 {
1038 struct pending_cmd *cmd;
1039
1040 /* If there's a pending mgmt command the flag will not yet have
1041 * it's final value, so check for this first.
1042 */
1043 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1044 if (cmd) {
1045 struct mgmt_mode *cp = cmd->param;
1046 return cp->val;
1047 }
1048
1049 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1050 }
1051
1052 static void disable_advertising(struct hci_request *req)
1053 {
1054 u8 enable = 0x00;
1055
1056 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1057 }
1058
1059 static void enable_advertising(struct hci_request *req)
1060 {
1061 struct hci_dev *hdev = req->hdev;
1062 struct hci_cp_le_set_adv_param cp;
1063 u8 own_addr_type, enable = 0x01;
1064 bool connectable;
1065
1066 if (hci_conn_num(hdev, LE_LINK) > 0)
1067 return;
1068
1069 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070 disable_advertising(req);
1071
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
1073 * hci_update_random_address knows that it's safe to go ahead
1074 * and write a new random address. The flag will be set back on
1075 * as soon as the SET_ADV_ENABLE HCI command completes.
1076 */
1077 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1078
1079 connectable = get_connectable(hdev);
1080
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1084 */
1085 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1086 return;
1087
1088 memset(&cp, 0, sizeof(cp));
1089 cp.min_interval = cpu_to_le16(0x0800);
1090 cp.max_interval = cpu_to_le16(0x0800);
1091 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1092 cp.own_address_type = own_addr_type;
1093 cp.channel_map = hdev->le_adv_channel_map;
1094
1095 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1096
1097 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1098 }
1099
1100 static void service_cache_off(struct work_struct *work)
1101 {
1102 struct hci_dev *hdev = container_of(work, struct hci_dev,
1103 service_cache.work);
1104 struct hci_request req;
1105
1106 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1107 return;
1108
1109 hci_req_init(&req, hdev);
1110
1111 hci_dev_lock(hdev);
1112
1113 update_eir(&req);
1114 update_class(&req);
1115
1116 hci_dev_unlock(hdev);
1117
1118 hci_req_run(&req, NULL);
1119 }
1120
1121 static void rpa_expired(struct work_struct *work)
1122 {
1123 struct hci_dev *hdev = container_of(work, struct hci_dev,
1124 rpa_expired.work);
1125 struct hci_request req;
1126
1127 BT_DBG("");
1128
1129 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1130
1131 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1132 return;
1133
1134 /* The generation of a new RPA and programming it into the
1135 * controller happens in the enable_advertising() function.
1136 */
1137 hci_req_init(&req, hdev);
1138 enable_advertising(&req);
1139 hci_req_run(&req, NULL);
1140 }
1141
1142 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1143 {
1144 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1145 return;
1146
1147 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1148 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1149
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1153 * it
1154 */
1155 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1156 }
1157
1158 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1159 void *data, u16 data_len)
1160 {
1161 struct mgmt_rp_read_info rp;
1162
1163 BT_DBG("sock %p %s", sk, hdev->name);
1164
1165 hci_dev_lock(hdev);
1166
1167 memset(&rp, 0, sizeof(rp));
1168
1169 bacpy(&rp.bdaddr, &hdev->bdaddr);
1170
1171 rp.version = hdev->hci_ver;
1172 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1173
1174 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1175 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1176
1177 memcpy(rp.dev_class, hdev->dev_class, 3);
1178
1179 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1180 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1181
1182 hci_dev_unlock(hdev);
1183
1184 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1185 sizeof(rp));
1186 }
1187
1188 static void mgmt_pending_free(struct pending_cmd *cmd)
1189 {
1190 sock_put(cmd->sk);
1191 kfree(cmd->param);
1192 kfree(cmd);
1193 }
1194
1195 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1196 struct hci_dev *hdev, void *data,
1197 u16 len)
1198 {
1199 struct pending_cmd *cmd;
1200
1201 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1202 if (!cmd)
1203 return NULL;
1204
1205 cmd->opcode = opcode;
1206 cmd->index = hdev->id;
1207
1208 cmd->param = kmalloc(len, GFP_KERNEL);
1209 if (!cmd->param) {
1210 kfree(cmd);
1211 return NULL;
1212 }
1213
1214 if (data)
1215 memcpy(cmd->param, data, len);
1216
1217 cmd->sk = sk;
1218 sock_hold(sk);
1219
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1221
1222 return cmd;
1223 }
1224
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1227 void *data),
1228 void *data)
1229 {
1230 struct pending_cmd *cmd, *tmp;
1231
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1234 continue;
1235
1236 cb(cmd, data);
1237 }
1238 }
1239
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1241 {
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1244 }
1245
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1252 }
1253
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1255 {
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1257
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261 }
1262 }
1263
1264 static bool hci_stop_discovery(struct hci_request *req)
1265 {
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1269
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 } else {
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1277 }
1278
1279 return true;
1280
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 NAME_PENDING);
1284 if (!e)
1285 break;
1286
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 &cp);
1290
1291 return true;
1292
1293 default:
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1297 return true;
1298 }
1299
1300 break;
1301 }
1302
1303 return false;
1304 }
1305
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1307 {
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1312
1313 hci_req_init(&req, hdev);
1314
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1317 u8 scan = 0x00;
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319 }
1320
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1323
1324 discov_stopped = hci_stop_discovery(&req);
1325
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1329
1330 switch (conn->state) {
1331 case BT_CONNECTED:
1332 case BT_CONFIG:
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 break;
1337 case BT_CONNECT:
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 0, NULL);
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 6, &conn->dst);
1344 break;
1345 case BT_CONNECT2:
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 sizeof(rej), &rej);
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 sizeof(rej), &rej);
1354 break;
1355 }
1356 }
1357
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361
1362 return err;
1363 }
1364
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 u16 len)
1367 {
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1370 int err;
1371
1372 BT_DBG("request for %s", hdev->name);
1373
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1377
1378 hci_dev_lock(hdev);
1379
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 MGMT_STATUS_BUSY);
1383 goto failed;
1384 }
1385
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1388
1389 if (cp->val) {
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 data, len);
1392 err = mgmt_powered(hdev, 1);
1393 goto failed;
1394 }
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 if (cp->val) {
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1410 err = 0;
1411 } else {
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1414 if (!err)
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1417
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 err = 0;
1423 }
1424 }
1425
1426 failed:
1427 hci_dev_unlock(hdev);
1428 return err;
1429 }
1430
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1432 {
1433 __le32 ev;
1434
1435 ev = cpu_to_le32(get_current_settings(hdev));
1436
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438 }
1439
1440 int mgmt_new_settings(struct hci_dev *hdev)
1441 {
1442 return new_settings(hdev, NULL);
1443 }
1444
1445 struct cmd_lookup {
1446 struct sock *sk;
1447 struct hci_dev *hdev;
1448 u8 mgmt_status;
1449 };
1450
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1452 {
1453 struct cmd_lookup *match = data;
1454
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1456
1457 list_del(&cmd->list);
1458
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1462 }
1463
1464 mgmt_pending_free(cmd);
1465 }
1466
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1468 {
1469 u8 *status = data;
1470
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1473 }
1474
1475 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1476 {
1477 if (!lmp_bredr_capable(hdev))
1478 return MGMT_STATUS_NOT_SUPPORTED;
1479 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1480 return MGMT_STATUS_REJECTED;
1481 else
1482 return MGMT_STATUS_SUCCESS;
1483 }
1484
1485 static u8 mgmt_le_support(struct hci_dev *hdev)
1486 {
1487 if (!lmp_le_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1490 return MGMT_STATUS_REJECTED;
1491 else
1492 return MGMT_STATUS_SUCCESS;
1493 }
1494
1495 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1496 {
1497 struct pending_cmd *cmd;
1498 struct mgmt_mode *cp;
1499 struct hci_request req;
1500 bool changed;
1501
1502 BT_DBG("status 0x%02x", status);
1503
1504 hci_dev_lock(hdev);
1505
1506 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1507 if (!cmd)
1508 goto unlock;
1509
1510 if (status) {
1511 u8 mgmt_err = mgmt_status(status);
1512 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1514 goto remove_cmd;
1515 }
1516
1517 cp = cmd->param;
1518 if (cp->val) {
1519 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1520 &hdev->dev_flags);
1521
1522 if (hdev->discov_timeout > 0) {
1523 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1524 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1525 to);
1526 }
1527 } else {
1528 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1529 &hdev->dev_flags);
1530 }
1531
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533
1534 if (changed)
1535 new_settings(hdev, cmd->sk);
1536
1537 /* When the discoverable mode gets changed, make sure
1538 * that class of device has the limited discoverable
1539 * bit correctly set.
1540 */
1541 hci_req_init(&req, hdev);
1542 update_class(&req);
1543 hci_req_run(&req, NULL);
1544
1545 remove_cmd:
1546 mgmt_pending_remove(cmd);
1547
1548 unlock:
1549 hci_dev_unlock(hdev);
1550 }
1551
1552 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1553 u16 len)
1554 {
1555 struct mgmt_cp_set_discoverable *cp = data;
1556 struct pending_cmd *cmd;
1557 struct hci_request req;
1558 u16 timeout;
1559 u8 scan;
1560 int err;
1561
1562 BT_DBG("request for %s", hdev->name);
1563
1564 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1568
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1572
1573 timeout = __le16_to_cpu(cp->timeout);
1574
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1577 */
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1588 goto failed;
1589 }
1590
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_BUSY);
1595 goto failed;
1596 }
1597
1598 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601 goto failed;
1602 }
1603
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1606
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1610 */
1611 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613 changed = true;
1614 }
1615
1616 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1617 if (err < 0)
1618 goto failed;
1619
1620 if (changed)
1621 err = new_settings(hdev, sk);
1622
1623 goto failed;
1624 }
1625
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1629 */
1630 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632 &hdev->dev_flags)) {
1633 cancel_delayed_work(&hdev->discov_off);
1634 hdev->discov_timeout = timeout;
1635
1636 if (cp->val && hdev->discov_timeout > 0) {
1637 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1639 to);
1640 }
1641
1642 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1643 goto failed;
1644 }
1645
1646 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1647 if (!cmd) {
1648 err = -ENOMEM;
1649 goto failed;
1650 }
1651
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1655 */
1656 cancel_delayed_work(&hdev->discov_off);
1657 hdev->discov_timeout = timeout;
1658
1659 /* Limited discoverable mode */
1660 if (cp->val == 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1662 else
1663 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1664
1665 hci_req_init(&req, hdev);
1666
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1669 */
1670 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1671 goto update_ad;
1672
1673 scan = SCAN_PAGE;
1674
1675 if (cp->val) {
1676 struct hci_cp_write_current_iac_lap hci_cp;
1677
1678 if (cp->val == 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1682 hci_cp.iac_lap[1] = 0x8b;
1683 hci_cp.iac_lap[2] = 0x9e;
1684 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1685 hci_cp.iac_lap[4] = 0x8b;
1686 hci_cp.iac_lap[5] = 0x9e;
1687 } else {
1688 /* General discoverable mode */
1689 hci_cp.num_iac = 1;
1690 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1691 hci_cp.iac_lap[1] = 0x8b;
1692 hci_cp.iac_lap[2] = 0x9e;
1693 }
1694
1695 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696 (hci_cp.num_iac * 3) + 1, &hci_cp);
1697
1698 scan |= SCAN_INQUIRY;
1699 } else {
1700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1701 }
1702
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1704
1705 update_ad:
1706 update_adv_data(&req);
1707
1708 err = hci_req_run(&req, set_discoverable_complete);
1709 if (err < 0)
1710 mgmt_pending_remove(cmd);
1711
1712 failed:
1713 hci_dev_unlock(hdev);
1714 return err;
1715 }
1716
1717 static void write_fast_connectable(struct hci_request *req, bool enable)
1718 {
1719 struct hci_dev *hdev = req->hdev;
1720 struct hci_cp_write_page_scan_activity acp;
1721 u8 type;
1722
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1724 return;
1725
1726 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1727 return;
1728
1729 if (enable) {
1730 type = PAGE_SCAN_TYPE_INTERLACED;
1731
1732 /* 160 msec page scan interval */
1733 acp.interval = cpu_to_le16(0x0100);
1734 } else {
1735 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1736
1737 /* default 1.28 sec page scan */
1738 acp.interval = cpu_to_le16(0x0800);
1739 }
1740
1741 acp.window = cpu_to_le16(0x0012);
1742
1743 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1746 sizeof(acp), &acp);
1747
1748 if (hdev->page_scan_type != type)
1749 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1750 }
1751
1752 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1753 {
1754 struct pending_cmd *cmd;
1755 struct mgmt_mode *cp;
1756 bool conn_changed, discov_changed;
1757
1758 BT_DBG("status 0x%02x", status);
1759
1760 hci_dev_lock(hdev);
1761
1762 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1763 if (!cmd)
1764 goto unlock;
1765
1766 if (status) {
1767 u8 mgmt_err = mgmt_status(status);
1768 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1769 goto remove_cmd;
1770 }
1771
1772 cp = cmd->param;
1773 if (cp->val) {
1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1775 &hdev->dev_flags);
1776 discov_changed = false;
1777 } else {
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779 &hdev->dev_flags);
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781 &hdev->dev_flags);
1782 }
1783
1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1785
1786 if (conn_changed || discov_changed) {
1787 new_settings(hdev, cmd->sk);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1791 }
1792
1793 remove_cmd:
1794 mgmt_pending_remove(cmd);
1795
1796 unlock:
1797 hci_dev_unlock(hdev);
1798 }
1799
1800 static int set_connectable_update_settings(struct hci_dev *hdev,
1801 struct sock *sk, u8 val)
1802 {
1803 bool changed = false;
1804 int err;
1805
1806 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1807 changed = true;
1808
1809 if (val) {
1810 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1811 } else {
1812 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1814 }
1815
1816 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1817 if (err < 0)
1818 return err;
1819
1820 if (changed) {
1821 hci_update_background_scan(hdev);
1822 return new_settings(hdev, sk);
1823 }
1824
1825 return 0;
1826 }
1827
1828 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1829 u16 len)
1830 {
1831 struct mgmt_mode *cp = data;
1832 struct pending_cmd *cmd;
1833 struct hci_request req;
1834 u8 scan;
1835 int err;
1836
1837 BT_DBG("request for %s", hdev->name);
1838
1839 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1840 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1841 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1842 MGMT_STATUS_REJECTED);
1843
1844 if (cp->val != 0x00 && cp->val != 0x01)
1845 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1846 MGMT_STATUS_INVALID_PARAMS);
1847
1848 hci_dev_lock(hdev);
1849
1850 if (!hdev_is_powered(hdev)) {
1851 err = set_connectable_update_settings(hdev, sk, cp->val);
1852 goto failed;
1853 }
1854
1855 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1856 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1857 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1858 MGMT_STATUS_BUSY);
1859 goto failed;
1860 }
1861
1862 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1863 if (!cmd) {
1864 err = -ENOMEM;
1865 goto failed;
1866 }
1867
1868 hci_req_init(&req, hdev);
1869
1870 /* If BR/EDR is not enabled and we disable advertising as a
1871 * by-product of disabling connectable, we need to update the
1872 * advertising flags.
1873 */
1874 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1875 if (!cp->val) {
1876 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1877 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1878 }
1879 update_adv_data(&req);
1880 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1881 if (cp->val) {
1882 scan = SCAN_PAGE;
1883 } else {
1884 scan = 0;
1885
1886 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1887 hdev->discov_timeout > 0)
1888 cancel_delayed_work(&hdev->discov_off);
1889 }
1890
1891 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1892 }
1893
1894 /* If we're going from non-connectable to connectable or
1895 * vice-versa when fast connectable is enabled ensure that fast
1896 * connectable gets disabled. write_fast_connectable won't do
1897 * anything if the page scan parameters are already what they
1898 * should be.
1899 */
1900 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1901 write_fast_connectable(&req, false);
1902
1903 /* Update the advertising parameters if necessary */
1904 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1905 enable_advertising(&req);
1906
1907 err = hci_req_run(&req, set_connectable_complete);
1908 if (err < 0) {
1909 mgmt_pending_remove(cmd);
1910 if (err == -ENODATA)
1911 err = set_connectable_update_settings(hdev, sk,
1912 cp->val);
1913 goto failed;
1914 }
1915
1916 failed:
1917 hci_dev_unlock(hdev);
1918 return err;
1919 }
1920
1921 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1922 u16 len)
1923 {
1924 struct mgmt_mode *cp = data;
1925 bool changed;
1926 int err;
1927
1928 BT_DBG("request for %s", hdev->name);
1929
1930 if (cp->val != 0x00 && cp->val != 0x01)
1931 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1932 MGMT_STATUS_INVALID_PARAMS);
1933
1934 hci_dev_lock(hdev);
1935
1936 if (cp->val)
1937 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1938 else
1939 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1940
1941 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1942 if (err < 0)
1943 goto unlock;
1944
1945 if (changed)
1946 err = new_settings(hdev, sk);
1947
1948 unlock:
1949 hci_dev_unlock(hdev);
1950 return err;
1951 }
1952
1953 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1954 u16 len)
1955 {
1956 struct mgmt_mode *cp = data;
1957 struct pending_cmd *cmd;
1958 u8 val, status;
1959 int err;
1960
1961 BT_DBG("request for %s", hdev->name);
1962
1963 status = mgmt_bredr_support(hdev);
1964 if (status)
1965 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1966 status);
1967
1968 if (cp->val != 0x00 && cp->val != 0x01)
1969 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1970 MGMT_STATUS_INVALID_PARAMS);
1971
1972 hci_dev_lock(hdev);
1973
1974 if (!hdev_is_powered(hdev)) {
1975 bool changed = false;
1976
1977 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1978 &hdev->dev_flags)) {
1979 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1980 changed = true;
1981 }
1982
1983 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1984 if (err < 0)
1985 goto failed;
1986
1987 if (changed)
1988 err = new_settings(hdev, sk);
1989
1990 goto failed;
1991 }
1992
1993 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1994 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1995 MGMT_STATUS_BUSY);
1996 goto failed;
1997 }
1998
1999 val = !!cp->val;
2000
2001 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2002 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2003 goto failed;
2004 }
2005
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2007 if (!cmd) {
2008 err = -ENOMEM;
2009 goto failed;
2010 }
2011
2012 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2013 if (err < 0) {
2014 mgmt_pending_remove(cmd);
2015 goto failed;
2016 }
2017
2018 failed:
2019 hci_dev_unlock(hdev);
2020 return err;
2021 }
2022
2023 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2024 {
2025 struct mgmt_mode *cp = data;
2026 struct pending_cmd *cmd;
2027 u8 status;
2028 int err;
2029
2030 BT_DBG("request for %s", hdev->name);
2031
2032 status = mgmt_bredr_support(hdev);
2033 if (status)
2034 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2035
2036 if (!lmp_ssp_capable(hdev))
2037 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2038 MGMT_STATUS_NOT_SUPPORTED);
2039
2040 if (cp->val != 0x00 && cp->val != 0x01)
2041 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_INVALID_PARAMS);
2043
2044 hci_dev_lock(hdev);
2045
2046 if (!hdev_is_powered(hdev)) {
2047 bool changed;
2048
2049 if (cp->val) {
2050 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2051 &hdev->dev_flags);
2052 } else {
2053 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2054 &hdev->dev_flags);
2055 if (!changed)
2056 changed = test_and_clear_bit(HCI_HS_ENABLED,
2057 &hdev->dev_flags);
2058 else
2059 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2060 }
2061
2062 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2063 if (err < 0)
2064 goto failed;
2065
2066 if (changed)
2067 err = new_settings(hdev, sk);
2068
2069 goto failed;
2070 }
2071
2072 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2073 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2074 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2075 MGMT_STATUS_BUSY);
2076 goto failed;
2077 }
2078
2079 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2080 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2081 goto failed;
2082 }
2083
2084 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2085 if (!cmd) {
2086 err = -ENOMEM;
2087 goto failed;
2088 }
2089
2090 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2091 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2092 sizeof(cp->val), &cp->val);
2093
2094 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2095 if (err < 0) {
2096 mgmt_pending_remove(cmd);
2097 goto failed;
2098 }
2099
2100 failed:
2101 hci_dev_unlock(hdev);
2102 return err;
2103 }
2104
2105 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2106 {
2107 struct mgmt_mode *cp = data;
2108 bool changed;
2109 u8 status;
2110 int err;
2111
2112 BT_DBG("request for %s", hdev->name);
2113
2114 status = mgmt_bredr_support(hdev);
2115 if (status)
2116 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2117
2118 if (!lmp_ssp_capable(hdev))
2119 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2120 MGMT_STATUS_NOT_SUPPORTED);
2121
2122 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2123 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2124 MGMT_STATUS_REJECTED);
2125
2126 if (cp->val != 0x00 && cp->val != 0x01)
2127 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2128 MGMT_STATUS_INVALID_PARAMS);
2129
2130 hci_dev_lock(hdev);
2131
2132 if (cp->val) {
2133 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2134 } else {
2135 if (hdev_is_powered(hdev)) {
2136 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2137 MGMT_STATUS_REJECTED);
2138 goto unlock;
2139 }
2140
2141 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2142 }
2143
2144 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2145 if (err < 0)
2146 goto unlock;
2147
2148 if (changed)
2149 err = new_settings(hdev, sk);
2150
2151 unlock:
2152 hci_dev_unlock(hdev);
2153 return err;
2154 }
2155
2156 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2157 {
2158 struct cmd_lookup match = { NULL, hdev };
2159
2160 if (status) {
2161 u8 mgmt_err = mgmt_status(status);
2162
2163 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2164 &mgmt_err);
2165 return;
2166 }
2167
2168 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2169
2170 new_settings(hdev, match.sk);
2171
2172 if (match.sk)
2173 sock_put(match.sk);
2174
2175 /* Make sure the controller has a good default for
2176 * advertising data. Restrict the update to when LE
2177 * has actually been enabled. During power on, the
2178 * update in powered_update_hci will take care of it.
2179 */
2180 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2181 struct hci_request req;
2182
2183 hci_dev_lock(hdev);
2184
2185 hci_req_init(&req, hdev);
2186 update_adv_data(&req);
2187 update_scan_rsp_data(&req);
2188 hci_req_run(&req, NULL);
2189
2190 hci_update_background_scan(hdev);
2191
2192 hci_dev_unlock(hdev);
2193 }
2194 }
2195
2196 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2197 {
2198 struct mgmt_mode *cp = data;
2199 struct hci_cp_write_le_host_supported hci_cp;
2200 struct pending_cmd *cmd;
2201 struct hci_request req;
2202 int err;
2203 u8 val, enabled;
2204
2205 BT_DBG("request for %s", hdev->name);
2206
2207 if (!lmp_le_capable(hdev))
2208 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2209 MGMT_STATUS_NOT_SUPPORTED);
2210
2211 if (cp->val != 0x00 && cp->val != 0x01)
2212 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2213 MGMT_STATUS_INVALID_PARAMS);
2214
2215 /* LE-only devices do not allow toggling LE on/off */
2216 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2217 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2218 MGMT_STATUS_REJECTED);
2219
2220 hci_dev_lock(hdev);
2221
2222 val = !!cp->val;
2223 enabled = lmp_host_le_capable(hdev);
2224
2225 if (!hdev_is_powered(hdev) || val == enabled) {
2226 bool changed = false;
2227
2228 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2229 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2230 changed = true;
2231 }
2232
2233 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2234 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2235 changed = true;
2236 }
2237
2238 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2239 if (err < 0)
2240 goto unlock;
2241
2242 if (changed)
2243 err = new_settings(hdev, sk);
2244
2245 goto unlock;
2246 }
2247
2248 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2249 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2250 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2251 MGMT_STATUS_BUSY);
2252 goto unlock;
2253 }
2254
2255 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2256 if (!cmd) {
2257 err = -ENOMEM;
2258 goto unlock;
2259 }
2260
2261 hci_req_init(&req, hdev);
2262
2263 memset(&hci_cp, 0, sizeof(hci_cp));
2264
2265 if (val) {
2266 hci_cp.le = val;
2267 hci_cp.simul = lmp_le_br_capable(hdev);
2268 } else {
2269 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2270 disable_advertising(&req);
2271 }
2272
2273 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2274 &hci_cp);
2275
2276 err = hci_req_run(&req, le_enable_complete);
2277 if (err < 0)
2278 mgmt_pending_remove(cmd);
2279
2280 unlock:
2281 hci_dev_unlock(hdev);
2282 return err;
2283 }
2284
2285 /* This is a helper function to test for pending mgmt commands that can
2286 * cause CoD or EIR HCI commands. We can only allow one such pending
2287 * mgmt command at a time since otherwise we cannot easily track what
2288 * the current values are, will be, and based on that calculate if a new
2289 * HCI command needs to be sent and if yes with what value.
2290 */
2291 static bool pending_eir_or_class(struct hci_dev *hdev)
2292 {
2293 struct pending_cmd *cmd;
2294
2295 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2296 switch (cmd->opcode) {
2297 case MGMT_OP_ADD_UUID:
2298 case MGMT_OP_REMOVE_UUID:
2299 case MGMT_OP_SET_DEV_CLASS:
2300 case MGMT_OP_SET_POWERED:
2301 return true;
2302 }
2303 }
2304
2305 return false;
2306 }
2307
2308 static const u8 bluetooth_base_uuid[] = {
2309 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2310 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2311 };
2312
2313 static u8 get_uuid_size(const u8 *uuid)
2314 {
2315 u32 val;
2316
2317 if (memcmp(uuid, bluetooth_base_uuid, 12))
2318 return 128;
2319
2320 val = get_unaligned_le32(&uuid[12]);
2321 if (val > 0xffff)
2322 return 32;
2323
2324 return 16;
2325 }
2326
2327 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2328 {
2329 struct pending_cmd *cmd;
2330
2331 hci_dev_lock(hdev);
2332
2333 cmd = mgmt_pending_find(mgmt_op, hdev);
2334 if (!cmd)
2335 goto unlock;
2336
2337 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2338 hdev->dev_class, 3);
2339
2340 mgmt_pending_remove(cmd);
2341
2342 unlock:
2343 hci_dev_unlock(hdev);
2344 }
2345
2346 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2347 {
2348 BT_DBG("status 0x%02x", status);
2349
2350 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2351 }
2352
2353 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2354 {
2355 struct mgmt_cp_add_uuid *cp = data;
2356 struct pending_cmd *cmd;
2357 struct hci_request req;
2358 struct bt_uuid *uuid;
2359 int err;
2360
2361 BT_DBG("request for %s", hdev->name);
2362
2363 hci_dev_lock(hdev);
2364
2365 if (pending_eir_or_class(hdev)) {
2366 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2367 MGMT_STATUS_BUSY);
2368 goto failed;
2369 }
2370
2371 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2372 if (!uuid) {
2373 err = -ENOMEM;
2374 goto failed;
2375 }
2376
2377 memcpy(uuid->uuid, cp->uuid, 16);
2378 uuid->svc_hint = cp->svc_hint;
2379 uuid->size = get_uuid_size(cp->uuid);
2380
2381 list_add_tail(&uuid->list, &hdev->uuids);
2382
2383 hci_req_init(&req, hdev);
2384
2385 update_class(&req);
2386 update_eir(&req);
2387
2388 err = hci_req_run(&req, add_uuid_complete);
2389 if (err < 0) {
2390 if (err != -ENODATA)
2391 goto failed;
2392
2393 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2394 hdev->dev_class, 3);
2395 goto failed;
2396 }
2397
2398 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2399 if (!cmd) {
2400 err = -ENOMEM;
2401 goto failed;
2402 }
2403
2404 err = 0;
2405
2406 failed:
2407 hci_dev_unlock(hdev);
2408 return err;
2409 }
2410
2411 static bool enable_service_cache(struct hci_dev *hdev)
2412 {
2413 if (!hdev_is_powered(hdev))
2414 return false;
2415
2416 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2417 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2418 CACHE_TIMEOUT);
2419 return true;
2420 }
2421
2422 return false;
2423 }
2424
2425 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2426 {
2427 BT_DBG("status 0x%02x", status);
2428
2429 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2430 }
2431
2432 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2433 u16 len)
2434 {
2435 struct mgmt_cp_remove_uuid *cp = data;
2436 struct pending_cmd *cmd;
2437 struct bt_uuid *match, *tmp;
2438 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2439 struct hci_request req;
2440 int err, found;
2441
2442 BT_DBG("request for %s", hdev->name);
2443
2444 hci_dev_lock(hdev);
2445
2446 if (pending_eir_or_class(hdev)) {
2447 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2448 MGMT_STATUS_BUSY);
2449 goto unlock;
2450 }
2451
2452 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2453 hci_uuids_clear(hdev);
2454
2455 if (enable_service_cache(hdev)) {
2456 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2457 0, hdev->dev_class, 3);
2458 goto unlock;
2459 }
2460
2461 goto update_class;
2462 }
2463
2464 found = 0;
2465
2466 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2467 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2468 continue;
2469
2470 list_del(&match->list);
2471 kfree(match);
2472 found++;
2473 }
2474
2475 if (found == 0) {
2476 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2477 MGMT_STATUS_INVALID_PARAMS);
2478 goto unlock;
2479 }
2480
2481 update_class:
2482 hci_req_init(&req, hdev);
2483
2484 update_class(&req);
2485 update_eir(&req);
2486
2487 err = hci_req_run(&req, remove_uuid_complete);
2488 if (err < 0) {
2489 if (err != -ENODATA)
2490 goto unlock;
2491
2492 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2493 hdev->dev_class, 3);
2494 goto unlock;
2495 }
2496
2497 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2498 if (!cmd) {
2499 err = -ENOMEM;
2500 goto unlock;
2501 }
2502
2503 err = 0;
2504
2505 unlock:
2506 hci_dev_unlock(hdev);
2507 return err;
2508 }
2509
2510 static void set_class_complete(struct hci_dev *hdev, u8 status)
2511 {
2512 BT_DBG("status 0x%02x", status);
2513
2514 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2515 }
2516
2517 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2518 u16 len)
2519 {
2520 struct mgmt_cp_set_dev_class *cp = data;
2521 struct pending_cmd *cmd;
2522 struct hci_request req;
2523 int err;
2524
2525 BT_DBG("request for %s", hdev->name);
2526
2527 if (!lmp_bredr_capable(hdev))
2528 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2529 MGMT_STATUS_NOT_SUPPORTED);
2530
2531 hci_dev_lock(hdev);
2532
2533 if (pending_eir_or_class(hdev)) {
2534 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2535 MGMT_STATUS_BUSY);
2536 goto unlock;
2537 }
2538
2539 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2540 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2541 MGMT_STATUS_INVALID_PARAMS);
2542 goto unlock;
2543 }
2544
2545 hdev->major_class = cp->major;
2546 hdev->minor_class = cp->minor;
2547
2548 if (!hdev_is_powered(hdev)) {
2549 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2550 hdev->dev_class, 3);
2551 goto unlock;
2552 }
2553
2554 hci_req_init(&req, hdev);
2555
2556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2557 hci_dev_unlock(hdev);
2558 cancel_delayed_work_sync(&hdev->service_cache);
2559 hci_dev_lock(hdev);
2560 update_eir(&req);
2561 }
2562
2563 update_class(&req);
2564
2565 err = hci_req_run(&req, set_class_complete);
2566 if (err < 0) {
2567 if (err != -ENODATA)
2568 goto unlock;
2569
2570 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2571 hdev->dev_class, 3);
2572 goto unlock;
2573 }
2574
2575 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2576 if (!cmd) {
2577 err = -ENOMEM;
2578 goto unlock;
2579 }
2580
2581 err = 0;
2582
2583 unlock:
2584 hci_dev_unlock(hdev);
2585 return err;
2586 }
2587
2588 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2589 u16 len)
2590 {
2591 struct mgmt_cp_load_link_keys *cp = data;
2592 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2593 sizeof(struct mgmt_link_key_info));
2594 u16 key_count, expected_len;
2595 bool changed;
2596 int i;
2597
2598 BT_DBG("request for %s", hdev->name);
2599
2600 if (!lmp_bredr_capable(hdev))
2601 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2602 MGMT_STATUS_NOT_SUPPORTED);
2603
2604 key_count = __le16_to_cpu(cp->key_count);
2605 if (key_count > max_key_count) {
2606 BT_ERR("load_link_keys: too big key_count value %u",
2607 key_count);
2608 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2609 MGMT_STATUS_INVALID_PARAMS);
2610 }
2611
2612 expected_len = sizeof(*cp) + key_count *
2613 sizeof(struct mgmt_link_key_info);
2614 if (expected_len != len) {
2615 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2616 expected_len, len);
2617 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2618 MGMT_STATUS_INVALID_PARAMS);
2619 }
2620
2621 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2622 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2623 MGMT_STATUS_INVALID_PARAMS);
2624
2625 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2626 key_count);
2627
2628 for (i = 0; i < key_count; i++) {
2629 struct mgmt_link_key_info *key = &cp->keys[i];
2630
2631 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2632 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2633 MGMT_STATUS_INVALID_PARAMS);
2634 }
2635
2636 hci_dev_lock(hdev);
2637
2638 hci_link_keys_clear(hdev);
2639
2640 if (cp->debug_keys)
2641 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2642 &hdev->dev_flags);
2643 else
2644 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2645 &hdev->dev_flags);
2646
2647 if (changed)
2648 new_settings(hdev, NULL);
2649
2650 for (i = 0; i < key_count; i++) {
2651 struct mgmt_link_key_info *key = &cp->keys[i];
2652
2653 /* Always ignore debug keys and require a new pairing if
2654 * the user wants to use them.
2655 */
2656 if (key->type == HCI_LK_DEBUG_COMBINATION)
2657 continue;
2658
2659 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2660 key->type, key->pin_len, NULL);
2661 }
2662
2663 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2664
2665 hci_dev_unlock(hdev);
2666
2667 return 0;
2668 }
2669
2670 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2671 u8 addr_type, struct sock *skip_sk)
2672 {
2673 struct mgmt_ev_device_unpaired ev;
2674
2675 bacpy(&ev.addr.bdaddr, bdaddr);
2676 ev.addr.type = addr_type;
2677
2678 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2679 skip_sk);
2680 }
2681
2682 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2683 u16 len)
2684 {
2685 struct mgmt_cp_unpair_device *cp = data;
2686 struct mgmt_rp_unpair_device rp;
2687 struct hci_cp_disconnect dc;
2688 struct pending_cmd *cmd;
2689 struct hci_conn *conn;
2690 int err;
2691
2692 memset(&rp, 0, sizeof(rp));
2693 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2694 rp.addr.type = cp->addr.type;
2695
2696 if (!bdaddr_type_is_valid(cp->addr.type))
2697 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2698 MGMT_STATUS_INVALID_PARAMS,
2699 &rp, sizeof(rp));
2700
2701 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2702 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2703 MGMT_STATUS_INVALID_PARAMS,
2704 &rp, sizeof(rp));
2705
2706 hci_dev_lock(hdev);
2707
2708 if (!hdev_is_powered(hdev)) {
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2711 goto unlock;
2712 }
2713
2714 if (cp->addr.type == BDADDR_BREDR) {
2715 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2716 } else {
2717 u8 addr_type;
2718
2719 if (cp->addr.type == BDADDR_LE_PUBLIC)
2720 addr_type = ADDR_LE_DEV_PUBLIC;
2721 else
2722 addr_type = ADDR_LE_DEV_RANDOM;
2723
2724 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2725
2726 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2727
2728 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2729 }
2730
2731 if (err < 0) {
2732 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2733 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2734 goto unlock;
2735 }
2736
2737 if (cp->disconnect) {
2738 if (cp->addr.type == BDADDR_BREDR)
2739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2740 &cp->addr.bdaddr);
2741 else
2742 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2743 &cp->addr.bdaddr);
2744 } else {
2745 conn = NULL;
2746 }
2747
2748 if (!conn) {
2749 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2750 &rp, sizeof(rp));
2751 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2752 goto unlock;
2753 }
2754
2755 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2756 sizeof(*cp));
2757 if (!cmd) {
2758 err = -ENOMEM;
2759 goto unlock;
2760 }
2761
2762 dc.handle = cpu_to_le16(conn->handle);
2763 dc.reason = 0x13; /* Remote User Terminated Connection */
2764 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2765 if (err < 0)
2766 mgmt_pending_remove(cmd);
2767
2768 unlock:
2769 hci_dev_unlock(hdev);
2770 return err;
2771 }
2772
2773 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2774 u16 len)
2775 {
2776 struct mgmt_cp_disconnect *cp = data;
2777 struct mgmt_rp_disconnect rp;
2778 struct hci_cp_disconnect dc;
2779 struct pending_cmd *cmd;
2780 struct hci_conn *conn;
2781 int err;
2782
2783 BT_DBG("");
2784
2785 memset(&rp, 0, sizeof(rp));
2786 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2787 rp.addr.type = cp->addr.type;
2788
2789 if (!bdaddr_type_is_valid(cp->addr.type))
2790 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2791 MGMT_STATUS_INVALID_PARAMS,
2792 &rp, sizeof(rp));
2793
2794 hci_dev_lock(hdev);
2795
2796 if (!test_bit(HCI_UP, &hdev->flags)) {
2797 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2798 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2799 goto failed;
2800 }
2801
2802 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2803 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2804 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2805 goto failed;
2806 }
2807
2808 if (cp->addr.type == BDADDR_BREDR)
2809 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2810 &cp->addr.bdaddr);
2811 else
2812 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2813
2814 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2815 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2816 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2817 goto failed;
2818 }
2819
2820 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2821 if (!cmd) {
2822 err = -ENOMEM;
2823 goto failed;
2824 }
2825
2826 dc.handle = cpu_to_le16(conn->handle);
2827 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2828
2829 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2830 if (err < 0)
2831 mgmt_pending_remove(cmd);
2832
2833 failed:
2834 hci_dev_unlock(hdev);
2835 return err;
2836 }
2837
2838 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2839 {
2840 switch (link_type) {
2841 case LE_LINK:
2842 switch (addr_type) {
2843 case ADDR_LE_DEV_PUBLIC:
2844 return BDADDR_LE_PUBLIC;
2845
2846 default:
2847 /* Fallback to LE Random address type */
2848 return BDADDR_LE_RANDOM;
2849 }
2850
2851 default:
2852 /* Fallback to BR/EDR type */
2853 return BDADDR_BREDR;
2854 }
2855 }
2856
2857 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2858 u16 data_len)
2859 {
2860 struct mgmt_rp_get_connections *rp;
2861 struct hci_conn *c;
2862 size_t rp_len;
2863 int err;
2864 u16 i;
2865
2866 BT_DBG("");
2867
2868 hci_dev_lock(hdev);
2869
2870 if (!hdev_is_powered(hdev)) {
2871 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2872 MGMT_STATUS_NOT_POWERED);
2873 goto unlock;
2874 }
2875
2876 i = 0;
2877 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2878 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2879 i++;
2880 }
2881
2882 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2883 rp = kmalloc(rp_len, GFP_KERNEL);
2884 if (!rp) {
2885 err = -ENOMEM;
2886 goto unlock;
2887 }
2888
2889 i = 0;
2890 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2891 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2892 continue;
2893 bacpy(&rp->addr[i].bdaddr, &c->dst);
2894 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2895 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2896 continue;
2897 i++;
2898 }
2899
2900 rp->conn_count = cpu_to_le16(i);
2901
2902 /* Recalculate length in case of filtered SCO connections, etc */
2903 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2904
2905 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2906 rp_len);
2907
2908 kfree(rp);
2909
2910 unlock:
2911 hci_dev_unlock(hdev);
2912 return err;
2913 }
2914
2915 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2916 struct mgmt_cp_pin_code_neg_reply *cp)
2917 {
2918 struct pending_cmd *cmd;
2919 int err;
2920
2921 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2922 sizeof(*cp));
2923 if (!cmd)
2924 return -ENOMEM;
2925
2926 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2927 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2928 if (err < 0)
2929 mgmt_pending_remove(cmd);
2930
2931 return err;
2932 }
2933
2934 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2935 u16 len)
2936 {
2937 struct hci_conn *conn;
2938 struct mgmt_cp_pin_code_reply *cp = data;
2939 struct hci_cp_pin_code_reply reply;
2940 struct pending_cmd *cmd;
2941 int err;
2942
2943 BT_DBG("");
2944
2945 hci_dev_lock(hdev);
2946
2947 if (!hdev_is_powered(hdev)) {
2948 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2949 MGMT_STATUS_NOT_POWERED);
2950 goto failed;
2951 }
2952
2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2954 if (!conn) {
2955 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2956 MGMT_STATUS_NOT_CONNECTED);
2957 goto failed;
2958 }
2959
2960 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2961 struct mgmt_cp_pin_code_neg_reply ncp;
2962
2963 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2964
2965 BT_ERR("PIN code is not 16 bytes long");
2966
2967 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2968 if (err >= 0)
2969 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2970 MGMT_STATUS_INVALID_PARAMS);
2971
2972 goto failed;
2973 }
2974
2975 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2976 if (!cmd) {
2977 err = -ENOMEM;
2978 goto failed;
2979 }
2980
2981 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2982 reply.pin_len = cp->pin_len;
2983 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2984
2985 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2986 if (err < 0)
2987 mgmt_pending_remove(cmd);
2988
2989 failed:
2990 hci_dev_unlock(hdev);
2991 return err;
2992 }
2993
2994 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2995 u16 len)
2996 {
2997 struct mgmt_cp_set_io_capability *cp = data;
2998
2999 BT_DBG("");
3000
3001 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3002 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3003 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3004
3005 hci_dev_lock(hdev);
3006
3007 hdev->io_capability = cp->io_capability;
3008
3009 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3010 hdev->io_capability);
3011
3012 hci_dev_unlock(hdev);
3013
3014 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3015 0);
3016 }
3017
3018 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3019 {
3020 struct hci_dev *hdev = conn->hdev;
3021 struct pending_cmd *cmd;
3022
3023 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3024 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3025 continue;
3026
3027 if (cmd->user_data != conn)
3028 continue;
3029
3030 return cmd;
3031 }
3032
3033 return NULL;
3034 }
3035
3036 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3037 {
3038 struct mgmt_rp_pair_device rp;
3039 struct hci_conn *conn = cmd->user_data;
3040
3041 bacpy(&rp.addr.bdaddr, &conn->dst);
3042 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3043
3044 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3045 &rp, sizeof(rp));
3046
3047 /* So we don't get further callbacks for this connection */
3048 conn->connect_cfm_cb = NULL;
3049 conn->security_cfm_cb = NULL;
3050 conn->disconn_cfm_cb = NULL;
3051
3052 hci_conn_drop(conn);
3053
3054 mgmt_pending_remove(cmd);
3055 }
3056
3057 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3058 {
3059 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3060 struct pending_cmd *cmd;
3061
3062 cmd = find_pairing(conn);
3063 if (cmd)
3064 pairing_complete(cmd, status);
3065 }
3066
3067 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3068 {
3069 struct pending_cmd *cmd;
3070
3071 BT_DBG("status %u", status);
3072
3073 cmd = find_pairing(conn);
3074 if (!cmd)
3075 BT_DBG("Unable to find a pending command");
3076 else
3077 pairing_complete(cmd, mgmt_status(status));
3078 }
3079
3080 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3081 {
3082 struct pending_cmd *cmd;
3083
3084 BT_DBG("status %u", status);
3085
3086 if (!status)
3087 return;
3088
3089 cmd = find_pairing(conn);
3090 if (!cmd)
3091 BT_DBG("Unable to find a pending command");
3092 else
3093 pairing_complete(cmd, mgmt_status(status));
3094 }
3095
3096 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3097 u16 len)
3098 {
3099 struct mgmt_cp_pair_device *cp = data;
3100 struct mgmt_rp_pair_device rp;
3101 struct pending_cmd *cmd;
3102 u8 sec_level, auth_type;
3103 struct hci_conn *conn;
3104 int err;
3105
3106 BT_DBG("");
3107
3108 memset(&rp, 0, sizeof(rp));
3109 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3110 rp.addr.type = cp->addr.type;
3111
3112 if (!bdaddr_type_is_valid(cp->addr.type))
3113 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3114 MGMT_STATUS_INVALID_PARAMS,
3115 &rp, sizeof(rp));
3116
3117 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3118 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3119 MGMT_STATUS_INVALID_PARAMS,
3120 &rp, sizeof(rp));
3121
3122 hci_dev_lock(hdev);
3123
3124 if (!hdev_is_powered(hdev)) {
3125 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3126 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3127 goto unlock;
3128 }
3129
3130 sec_level = BT_SECURITY_MEDIUM;
3131 auth_type = HCI_AT_DEDICATED_BONDING;
3132
3133 if (cp->addr.type == BDADDR_BREDR) {
3134 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3135 auth_type);
3136 } else {
3137 u8 addr_type;
3138
3139 /* Convert from L2CAP channel address type to HCI address type
3140 */
3141 if (cp->addr.type == BDADDR_LE_PUBLIC)
3142 addr_type = ADDR_LE_DEV_PUBLIC;
3143 else
3144 addr_type = ADDR_LE_DEV_RANDOM;
3145
3146 /* When pairing a new device, it is expected to remember
3147 * this device for future connections. Adding the connection
3148 * parameter information ahead of time allows tracking
3149 * of the slave preferred values and will speed up any
3150 * further connection establishment.
3151 *
3152 * If connection parameters already exist, then they
3153 * will be kept and this function does nothing.
3154 */
3155 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3156
3157 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3158 sec_level, HCI_LE_CONN_TIMEOUT,
3159 HCI_ROLE_MASTER);
3160 }
3161
3162 if (IS_ERR(conn)) {
3163 int status;
3164
3165 if (PTR_ERR(conn) == -EBUSY)
3166 status = MGMT_STATUS_BUSY;
3167 else
3168 status = MGMT_STATUS_CONNECT_FAILED;
3169
3170 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3171 status, &rp,
3172 sizeof(rp));
3173 goto unlock;
3174 }
3175
3176 if (conn->connect_cfm_cb) {
3177 hci_conn_drop(conn);
3178 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3179 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3180 goto unlock;
3181 }
3182
3183 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3184 if (!cmd) {
3185 err = -ENOMEM;
3186 hci_conn_drop(conn);
3187 goto unlock;
3188 }
3189
3190 /* For LE, just connecting isn't a proof that the pairing finished */
3191 if (cp->addr.type == BDADDR_BREDR) {
3192 conn->connect_cfm_cb = pairing_complete_cb;
3193 conn->security_cfm_cb = pairing_complete_cb;
3194 conn->disconn_cfm_cb = pairing_complete_cb;
3195 } else {
3196 conn->connect_cfm_cb = le_pairing_complete_cb;
3197 conn->security_cfm_cb = le_pairing_complete_cb;
3198 conn->disconn_cfm_cb = le_pairing_complete_cb;
3199 }
3200
3201 conn->io_capability = cp->io_cap;
3202 cmd->user_data = conn;
3203
3204 if (conn->state == BT_CONNECTED &&
3205 hci_conn_security(conn, sec_level, auth_type, true))
3206 pairing_complete(cmd, 0);
3207
3208 err = 0;
3209
3210 unlock:
3211 hci_dev_unlock(hdev);
3212 return err;
3213 }
3214
3215 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3216 u16 len)
3217 {
3218 struct mgmt_addr_info *addr = data;
3219 struct pending_cmd *cmd;
3220 struct hci_conn *conn;
3221 int err;
3222
3223 BT_DBG("");
3224
3225 hci_dev_lock(hdev);
3226
3227 if (!hdev_is_powered(hdev)) {
3228 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3229 MGMT_STATUS_NOT_POWERED);
3230 goto unlock;
3231 }
3232
3233 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3234 if (!cmd) {
3235 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3236 MGMT_STATUS_INVALID_PARAMS);
3237 goto unlock;
3238 }
3239
3240 conn = cmd->user_data;
3241
3242 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3243 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3244 MGMT_STATUS_INVALID_PARAMS);
3245 goto unlock;
3246 }
3247
3248 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3249
3250 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3251 addr, sizeof(*addr));
3252 unlock:
3253 hci_dev_unlock(hdev);
3254 return err;
3255 }
3256
3257 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3258 struct mgmt_addr_info *addr, u16 mgmt_op,
3259 u16 hci_op, __le32 passkey)
3260 {
3261 struct pending_cmd *cmd;
3262 struct hci_conn *conn;
3263 int err;
3264
3265 hci_dev_lock(hdev);
3266
3267 if (!hdev_is_powered(hdev)) {
3268 err = cmd_complete(sk, hdev->id, mgmt_op,
3269 MGMT_STATUS_NOT_POWERED, addr,
3270 sizeof(*addr));
3271 goto done;
3272 }
3273
3274 if (addr->type == BDADDR_BREDR)
3275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3276 else
3277 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3278
3279 if (!conn) {
3280 err = cmd_complete(sk, hdev->id, mgmt_op,
3281 MGMT_STATUS_NOT_CONNECTED, addr,
3282 sizeof(*addr));
3283 goto done;
3284 }
3285
3286 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3287 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3288 if (!err)
3289 err = cmd_complete(sk, hdev->id, mgmt_op,
3290 MGMT_STATUS_SUCCESS, addr,
3291 sizeof(*addr));
3292 else
3293 err = cmd_complete(sk, hdev->id, mgmt_op,
3294 MGMT_STATUS_FAILED, addr,
3295 sizeof(*addr));
3296
3297 goto done;
3298 }
3299
3300 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3301 if (!cmd) {
3302 err = -ENOMEM;
3303 goto done;
3304 }
3305
3306 /* Continue with pairing via HCI */
3307 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3308 struct hci_cp_user_passkey_reply cp;
3309
3310 bacpy(&cp.bdaddr, &addr->bdaddr);
3311 cp.passkey = passkey;
3312 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3313 } else
3314 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3315 &addr->bdaddr);
3316
3317 if (err < 0)
3318 mgmt_pending_remove(cmd);
3319
3320 done:
3321 hci_dev_unlock(hdev);
3322 return err;
3323 }
3324
3325 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3326 void *data, u16 len)
3327 {
3328 struct mgmt_cp_pin_code_neg_reply *cp = data;
3329
3330 BT_DBG("");
3331
3332 return user_pairing_resp(sk, hdev, &cp->addr,
3333 MGMT_OP_PIN_CODE_NEG_REPLY,
3334 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3335 }
3336
3337 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3338 u16 len)
3339 {
3340 struct mgmt_cp_user_confirm_reply *cp = data;
3341
3342 BT_DBG("");
3343
3344 if (len != sizeof(*cp))
3345 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3346 MGMT_STATUS_INVALID_PARAMS);
3347
3348 return user_pairing_resp(sk, hdev, &cp->addr,
3349 MGMT_OP_USER_CONFIRM_REPLY,
3350 HCI_OP_USER_CONFIRM_REPLY, 0);
3351 }
3352
3353 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3354 void *data, u16 len)
3355 {
3356 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3357
3358 BT_DBG("");
3359
3360 return user_pairing_resp(sk, hdev, &cp->addr,
3361 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3362 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3363 }
3364
3365 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3366 u16 len)
3367 {
3368 struct mgmt_cp_user_passkey_reply *cp = data;
3369
3370 BT_DBG("");
3371
3372 return user_pairing_resp(sk, hdev, &cp->addr,
3373 MGMT_OP_USER_PASSKEY_REPLY,
3374 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3375 }
3376
3377 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3378 void *data, u16 len)
3379 {
3380 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3381
3382 BT_DBG("");
3383
3384 return user_pairing_resp(sk, hdev, &cp->addr,
3385 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3386 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3387 }
3388
3389 static void update_name(struct hci_request *req)
3390 {
3391 struct hci_dev *hdev = req->hdev;
3392 struct hci_cp_write_local_name cp;
3393
3394 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3395
3396 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3397 }
3398
3399 static void set_name_complete(struct hci_dev *hdev, u8 status)
3400 {
3401 struct mgmt_cp_set_local_name *cp;
3402 struct pending_cmd *cmd;
3403
3404 BT_DBG("status 0x%02x", status);
3405
3406 hci_dev_lock(hdev);
3407
3408 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3409 if (!cmd)
3410 goto unlock;
3411
3412 cp = cmd->param;
3413
3414 if (status)
3415 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3416 mgmt_status(status));
3417 else
3418 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3419 cp, sizeof(*cp));
3420
3421 mgmt_pending_remove(cmd);
3422
3423 unlock:
3424 hci_dev_unlock(hdev);
3425 }
3426
3427 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3428 u16 len)
3429 {
3430 struct mgmt_cp_set_local_name *cp = data;
3431 struct pending_cmd *cmd;
3432 struct hci_request req;
3433 int err;
3434
3435 BT_DBG("");
3436
3437 hci_dev_lock(hdev);
3438
3439 /* If the old values are the same as the new ones just return a
3440 * direct command complete event.
3441 */
3442 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3443 !memcmp(hdev->short_name, cp->short_name,
3444 sizeof(hdev->short_name))) {
3445 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3446 data, len);
3447 goto failed;
3448 }
3449
3450 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3451
3452 if (!hdev_is_powered(hdev)) {
3453 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3454
3455 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3456 data, len);
3457 if (err < 0)
3458 goto failed;
3459
3460 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3461 sk);
3462
3463 goto failed;
3464 }
3465
3466 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3467 if (!cmd) {
3468 err = -ENOMEM;
3469 goto failed;
3470 }
3471
3472 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3473
3474 hci_req_init(&req, hdev);
3475
3476 if (lmp_bredr_capable(hdev)) {
3477 update_name(&req);
3478 update_eir(&req);
3479 }
3480
3481 /* The name is stored in the scan response data and so
3482 * no need to udpate the advertising data here.
3483 */
3484 if (lmp_le_capable(hdev))
3485 update_scan_rsp_data(&req);
3486
3487 err = hci_req_run(&req, set_name_complete);
3488 if (err < 0)
3489 mgmt_pending_remove(cmd);
3490
3491 failed:
3492 hci_dev_unlock(hdev);
3493 return err;
3494 }
3495
3496 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3497 void *data, u16 data_len)
3498 {
3499 struct pending_cmd *cmd;
3500 int err;
3501
3502 BT_DBG("%s", hdev->name);
3503
3504 hci_dev_lock(hdev);
3505
3506 if (!hdev_is_powered(hdev)) {
3507 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3508 MGMT_STATUS_NOT_POWERED);
3509 goto unlock;
3510 }
3511
3512 if (!lmp_ssp_capable(hdev)) {
3513 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3514 MGMT_STATUS_NOT_SUPPORTED);
3515 goto unlock;
3516 }
3517
3518 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3519 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3520 MGMT_STATUS_BUSY);
3521 goto unlock;
3522 }
3523
3524 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3525 if (!cmd) {
3526 err = -ENOMEM;
3527 goto unlock;
3528 }
3529
3530 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3531 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3532 0, NULL);
3533 else
3534 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3535
3536 if (err < 0)
3537 mgmt_pending_remove(cmd);
3538
3539 unlock:
3540 hci_dev_unlock(hdev);
3541 return err;
3542 }
3543
3544 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3545 void *data, u16 len)
3546 {
3547 int err;
3548
3549 BT_DBG("%s ", hdev->name);
3550
3551 hci_dev_lock(hdev);
3552
3553 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3554 struct mgmt_cp_add_remote_oob_data *cp = data;
3555 u8 status;
3556
3557 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3558 cp->hash, cp->randomizer);
3559 if (err < 0)
3560 status = MGMT_STATUS_FAILED;
3561 else
3562 status = MGMT_STATUS_SUCCESS;
3563
3564 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3565 status, &cp->addr, sizeof(cp->addr));
3566 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3567 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3568 u8 status;
3569
3570 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3571 cp->hash192,
3572 cp->randomizer192,
3573 cp->hash256,
3574 cp->randomizer256);
3575 if (err < 0)
3576 status = MGMT_STATUS_FAILED;
3577 else
3578 status = MGMT_STATUS_SUCCESS;
3579
3580 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3581 status, &cp->addr, sizeof(cp->addr));
3582 } else {
3583 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3584 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3585 MGMT_STATUS_INVALID_PARAMS);
3586 }
3587
3588 hci_dev_unlock(hdev);
3589 return err;
3590 }
3591
3592 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3593 void *data, u16 len)
3594 {
3595 struct mgmt_cp_remove_remote_oob_data *cp = data;
3596 u8 status;
3597 int err;
3598
3599 BT_DBG("%s", hdev->name);
3600
3601 hci_dev_lock(hdev);
3602
3603 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3604 if (err < 0)
3605 status = MGMT_STATUS_INVALID_PARAMS;
3606 else
3607 status = MGMT_STATUS_SUCCESS;
3608
3609 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3610 status, &cp->addr, sizeof(cp->addr));
3611
3612 hci_dev_unlock(hdev);
3613 return err;
3614 }
3615
3616 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3617 {
3618 struct pending_cmd *cmd;
3619 u8 type;
3620 int err;
3621
3622 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3623
3624 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3625 if (!cmd)
3626 return -ENOENT;
3627
3628 type = hdev->discovery.type;
3629
3630 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3631 &type, sizeof(type));
3632 mgmt_pending_remove(cmd);
3633
3634 return err;
3635 }
3636
3637 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3638 {
3639 unsigned long timeout = 0;
3640
3641 BT_DBG("status %d", status);
3642
3643 if (status) {
3644 hci_dev_lock(hdev);
3645 mgmt_start_discovery_failed(hdev, status);
3646 hci_dev_unlock(hdev);
3647 return;
3648 }
3649
3650 hci_dev_lock(hdev);
3651 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3652 hci_dev_unlock(hdev);
3653
3654 switch (hdev->discovery.type) {
3655 case DISCOV_TYPE_LE:
3656 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3657 break;
3658
3659 case DISCOV_TYPE_INTERLEAVED:
3660 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3661 break;
3662
3663 case DISCOV_TYPE_BREDR:
3664 break;
3665
3666 default:
3667 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3668 }
3669
3670 if (!timeout)
3671 return;
3672
3673 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3674 }
3675
3676 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3677 void *data, u16 len)
3678 {
3679 struct mgmt_cp_start_discovery *cp = data;
3680 struct pending_cmd *cmd;
3681 struct hci_cp_le_set_scan_param param_cp;
3682 struct hci_cp_le_set_scan_enable enable_cp;
3683 struct hci_cp_inquiry inq_cp;
3684 struct hci_request req;
3685 /* General inquiry access code (GIAC) */
3686 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3687 u8 status, own_addr_type;
3688 int err;
3689
3690 BT_DBG("%s", hdev->name);
3691
3692 hci_dev_lock(hdev);
3693
3694 if (!hdev_is_powered(hdev)) {
3695 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3696 MGMT_STATUS_NOT_POWERED);
3697 goto failed;
3698 }
3699
3700 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3701 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3702 MGMT_STATUS_BUSY);
3703 goto failed;
3704 }
3705
3706 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3708 MGMT_STATUS_BUSY);
3709 goto failed;
3710 }
3711
3712 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3713 if (!cmd) {
3714 err = -ENOMEM;
3715 goto failed;
3716 }
3717
3718 hdev->discovery.type = cp->type;
3719
3720 hci_req_init(&req, hdev);
3721
3722 switch (hdev->discovery.type) {
3723 case DISCOV_TYPE_BREDR:
3724 status = mgmt_bredr_support(hdev);
3725 if (status) {
3726 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3727 status);
3728 mgmt_pending_remove(cmd);
3729 goto failed;
3730 }
3731
3732 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3733 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3734 MGMT_STATUS_BUSY);
3735 mgmt_pending_remove(cmd);
3736 goto failed;
3737 }
3738
3739 hci_inquiry_cache_flush(hdev);
3740
3741 memset(&inq_cp, 0, sizeof(inq_cp));
3742 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3743 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3744 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3745 break;
3746
3747 case DISCOV_TYPE_LE:
3748 case DISCOV_TYPE_INTERLEAVED:
3749 status = mgmt_le_support(hdev);
3750 if (status) {
3751 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3752 status);
3753 mgmt_pending_remove(cmd);
3754 goto failed;
3755 }
3756
3757 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3758 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3759 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3760 MGMT_STATUS_NOT_SUPPORTED);
3761 mgmt_pending_remove(cmd);
3762 goto failed;
3763 }
3764
3765 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3766 /* Don't let discovery abort an outgoing
3767 * connection attempt that's using directed
3768 * advertising.
3769 */
3770 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3771 BT_CONNECT)) {
3772 err = cmd_status(sk, hdev->id,
3773 MGMT_OP_START_DISCOVERY,
3774 MGMT_STATUS_REJECTED);
3775 mgmt_pending_remove(cmd);
3776 goto failed;
3777 }
3778
3779 disable_advertising(&req);
3780 }
3781
3782 /* If controller is scanning, it means the background scanning
3783 * is running. Thus, we should temporarily stop it in order to
3784 * set the discovery scanning parameters.
3785 */
3786 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3787 hci_req_add_le_scan_disable(&req);
3788
3789 memset(&param_cp, 0, sizeof(param_cp));
3790
3791 /* All active scans will be done with either a resolvable
3792 * private address (when privacy feature has been enabled)
3793 * or unresolvable private address.
3794 */
3795 err = hci_update_random_address(&req, true, &own_addr_type);
3796 if (err < 0) {
3797 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3798 MGMT_STATUS_FAILED);
3799 mgmt_pending_remove(cmd);
3800 goto failed;
3801 }
3802
3803 param_cp.type = LE_SCAN_ACTIVE;
3804 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3805 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3806 param_cp.own_address_type = own_addr_type;
3807 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3808 &param_cp);
3809
3810 memset(&enable_cp, 0, sizeof(enable_cp));
3811 enable_cp.enable = LE_SCAN_ENABLE;
3812 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3813 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3814 &enable_cp);
3815 break;
3816
3817 default:
3818 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3819 MGMT_STATUS_INVALID_PARAMS);
3820 mgmt_pending_remove(cmd);
3821 goto failed;
3822 }
3823
3824 err = hci_req_run(&req, start_discovery_complete);
3825 if (err < 0)
3826 mgmt_pending_remove(cmd);
3827 else
3828 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3829
3830 failed:
3831 hci_dev_unlock(hdev);
3832 return err;
3833 }
3834
3835 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3836 {
3837 struct pending_cmd *cmd;
3838 int err;
3839
3840 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3841 if (!cmd)
3842 return -ENOENT;
3843
3844 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3845 &hdev->discovery.type, sizeof(hdev->discovery.type));
3846 mgmt_pending_remove(cmd);
3847
3848 return err;
3849 }
3850
3851 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3852 {
3853 BT_DBG("status %d", status);
3854
3855 hci_dev_lock(hdev);
3856
3857 if (status) {
3858 mgmt_stop_discovery_failed(hdev, status);
3859 goto unlock;
3860 }
3861
3862 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3863
3864 unlock:
3865 hci_dev_unlock(hdev);
3866 }
3867
3868 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3869 u16 len)
3870 {
3871 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3872 struct pending_cmd *cmd;
3873 struct hci_request req;
3874 int err;
3875
3876 BT_DBG("%s", hdev->name);
3877
3878 hci_dev_lock(hdev);
3879
3880 if (!hci_discovery_active(hdev)) {
3881 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3882 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3883 sizeof(mgmt_cp->type));
3884 goto unlock;
3885 }
3886
3887 if (hdev->discovery.type != mgmt_cp->type) {
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3889 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3890 sizeof(mgmt_cp->type));
3891 goto unlock;
3892 }
3893
3894 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3895 if (!cmd) {
3896 err = -ENOMEM;
3897 goto unlock;
3898 }
3899
3900 hci_req_init(&req, hdev);
3901
3902 hci_stop_discovery(&req);
3903
3904 err = hci_req_run(&req, stop_discovery_complete);
3905 if (!err) {
3906 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3907 goto unlock;
3908 }
3909
3910 mgmt_pending_remove(cmd);
3911
3912 /* If no HCI commands were sent we're done */
3913 if (err == -ENODATA) {
3914 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3915 &mgmt_cp->type, sizeof(mgmt_cp->type));
3916 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3917 }
3918
3919 unlock:
3920 hci_dev_unlock(hdev);
3921 return err;
3922 }
3923
3924 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3925 u16 len)
3926 {
3927 struct mgmt_cp_confirm_name *cp = data;
3928 struct inquiry_entry *e;
3929 int err;
3930
3931 BT_DBG("%s", hdev->name);
3932
3933 hci_dev_lock(hdev);
3934
3935 if (!hci_discovery_active(hdev)) {
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3937 MGMT_STATUS_FAILED, &cp->addr,
3938 sizeof(cp->addr));
3939 goto failed;
3940 }
3941
3942 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3943 if (!e) {
3944 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3945 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3946 sizeof(cp->addr));
3947 goto failed;
3948 }
3949
3950 if (cp->name_known) {
3951 e->name_state = NAME_KNOWN;
3952 list_del(&e->list);
3953 } else {
3954 e->name_state = NAME_NEEDED;
3955 hci_inquiry_cache_update_resolve(hdev, e);
3956 }
3957
3958 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3959 sizeof(cp->addr));
3960
3961 failed:
3962 hci_dev_unlock(hdev);
3963 return err;
3964 }
3965
3966 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3967 u16 len)
3968 {
3969 struct mgmt_cp_block_device *cp = data;
3970 u8 status;
3971 int err;
3972
3973 BT_DBG("%s", hdev->name);
3974
3975 if (!bdaddr_type_is_valid(cp->addr.type))
3976 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3977 MGMT_STATUS_INVALID_PARAMS,
3978 &cp->addr, sizeof(cp->addr));
3979
3980 hci_dev_lock(hdev);
3981
3982 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3983 cp->addr.type);
3984 if (err < 0) {
3985 status = MGMT_STATUS_FAILED;
3986 goto done;
3987 }
3988
3989 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3990 sk);
3991 status = MGMT_STATUS_SUCCESS;
3992
3993 done:
3994 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3995 &cp->addr, sizeof(cp->addr));
3996
3997 hci_dev_unlock(hdev);
3998
3999 return err;
4000 }
4001
4002 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4003 u16 len)
4004 {
4005 struct mgmt_cp_unblock_device *cp = data;
4006 u8 status;
4007 int err;
4008
4009 BT_DBG("%s", hdev->name);
4010
4011 if (!bdaddr_type_is_valid(cp->addr.type))
4012 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4013 MGMT_STATUS_INVALID_PARAMS,
4014 &cp->addr, sizeof(cp->addr));
4015
4016 hci_dev_lock(hdev);
4017
4018 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4019 cp->addr.type);
4020 if (err < 0) {
4021 status = MGMT_STATUS_INVALID_PARAMS;
4022 goto done;
4023 }
4024
4025 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4026 sk);
4027 status = MGMT_STATUS_SUCCESS;
4028
4029 done:
4030 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4031 &cp->addr, sizeof(cp->addr));
4032
4033 hci_dev_unlock(hdev);
4034
4035 return err;
4036 }
4037
4038 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4039 u16 len)
4040 {
4041 struct mgmt_cp_set_device_id *cp = data;
4042 struct hci_request req;
4043 int err;
4044 __u16 source;
4045
4046 BT_DBG("%s", hdev->name);
4047
4048 source = __le16_to_cpu(cp->source);
4049
4050 if (source > 0x0002)
4051 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4052 MGMT_STATUS_INVALID_PARAMS);
4053
4054 hci_dev_lock(hdev);
4055
4056 hdev->devid_source = source;
4057 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4058 hdev->devid_product = __le16_to_cpu(cp->product);
4059 hdev->devid_version = __le16_to_cpu(cp->version);
4060
4061 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4062
4063 hci_req_init(&req, hdev);
4064 update_eir(&req);
4065 hci_req_run(&req, NULL);
4066
4067 hci_dev_unlock(hdev);
4068
4069 return err;
4070 }
4071
4072 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4073 {
4074 struct cmd_lookup match = { NULL, hdev };
4075
4076 if (status) {
4077 u8 mgmt_err = mgmt_status(status);
4078
4079 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4080 cmd_status_rsp, &mgmt_err);
4081 return;
4082 }
4083
4084 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4085 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4086 else
4087 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4088
4089 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4090 &match);
4091
4092 new_settings(hdev, match.sk);
4093
4094 if (match.sk)
4095 sock_put(match.sk);
4096 }
4097
4098 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4099 u16 len)
4100 {
4101 struct mgmt_mode *cp = data;
4102 struct pending_cmd *cmd;
4103 struct hci_request req;
4104 u8 val, enabled, status;
4105 int err;
4106
4107 BT_DBG("request for %s", hdev->name);
4108
4109 status = mgmt_le_support(hdev);
4110 if (status)
4111 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4112 status);
4113
4114 if (cp->val != 0x00 && cp->val != 0x01)
4115 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4116 MGMT_STATUS_INVALID_PARAMS);
4117
4118 hci_dev_lock(hdev);
4119
4120 val = !!cp->val;
4121 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4122
4123 /* The following conditions are ones which mean that we should
4124 * not do any HCI communication but directly send a mgmt
4125 * response to user space (after toggling the flag if
4126 * necessary).
4127 */
4128 if (!hdev_is_powered(hdev) || val == enabled ||
4129 hci_conn_num(hdev, LE_LINK) > 0 ||
4130 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4131 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4132 bool changed = false;
4133
4134 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4135 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4136 changed = true;
4137 }
4138
4139 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4140 if (err < 0)
4141 goto unlock;
4142
4143 if (changed)
4144 err = new_settings(hdev, sk);
4145
4146 goto unlock;
4147 }
4148
4149 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4150 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4151 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4152 MGMT_STATUS_BUSY);
4153 goto unlock;
4154 }
4155
4156 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4157 if (!cmd) {
4158 err = -ENOMEM;
4159 goto unlock;
4160 }
4161
4162 hci_req_init(&req, hdev);
4163
4164 if (val)
4165 enable_advertising(&req);
4166 else
4167 disable_advertising(&req);
4168
4169 err = hci_req_run(&req, set_advertising_complete);
4170 if (err < 0)
4171 mgmt_pending_remove(cmd);
4172
4173 unlock:
4174 hci_dev_unlock(hdev);
4175 return err;
4176 }
4177
4178 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4179 void *data, u16 len)
4180 {
4181 struct mgmt_cp_set_static_address *cp = data;
4182 int err;
4183
4184 BT_DBG("%s", hdev->name);
4185
4186 if (!lmp_le_capable(hdev))
4187 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4188 MGMT_STATUS_NOT_SUPPORTED);
4189
4190 if (hdev_is_powered(hdev))
4191 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4192 MGMT_STATUS_REJECTED);
4193
4194 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4195 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4196 return cmd_status(sk, hdev->id,
4197 MGMT_OP_SET_STATIC_ADDRESS,
4198 MGMT_STATUS_INVALID_PARAMS);
4199
4200 /* Two most significant bits shall be set */
4201 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4202 return cmd_status(sk, hdev->id,
4203 MGMT_OP_SET_STATIC_ADDRESS,
4204 MGMT_STATUS_INVALID_PARAMS);
4205 }
4206
4207 hci_dev_lock(hdev);
4208
4209 bacpy(&hdev->static_addr, &cp->bdaddr);
4210
4211 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4212
4213 hci_dev_unlock(hdev);
4214
4215 return err;
4216 }
4217
4218 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4219 void *data, u16 len)
4220 {
4221 struct mgmt_cp_set_scan_params *cp = data;
4222 __u16 interval, window;
4223 int err;
4224
4225 BT_DBG("%s", hdev->name);
4226
4227 if (!lmp_le_capable(hdev))
4228 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4229 MGMT_STATUS_NOT_SUPPORTED);
4230
4231 interval = __le16_to_cpu(cp->interval);
4232
4233 if (interval < 0x0004 || interval > 0x4000)
4234 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4235 MGMT_STATUS_INVALID_PARAMS);
4236
4237 window = __le16_to_cpu(cp->window);
4238
4239 if (window < 0x0004 || window > 0x4000)
4240 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4241 MGMT_STATUS_INVALID_PARAMS);
4242
4243 if (window > interval)
4244 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4245 MGMT_STATUS_INVALID_PARAMS);
4246
4247 hci_dev_lock(hdev);
4248
4249 hdev->le_scan_interval = interval;
4250 hdev->le_scan_window = window;
4251
4252 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4253
4254 /* If background scan is running, restart it so new parameters are
4255 * loaded.
4256 */
4257 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4258 hdev->discovery.state == DISCOVERY_STOPPED) {
4259 struct hci_request req;
4260
4261 hci_req_init(&req, hdev);
4262
4263 hci_req_add_le_scan_disable(&req);
4264 hci_req_add_le_passive_scan(&req);
4265
4266 hci_req_run(&req, NULL);
4267 }
4268
4269 hci_dev_unlock(hdev);
4270
4271 return err;
4272 }
4273
4274 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4275 {
4276 struct pending_cmd *cmd;
4277
4278 BT_DBG("status 0x%02x", status);
4279
4280 hci_dev_lock(hdev);
4281
4282 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4283 if (!cmd)
4284 goto unlock;
4285
4286 if (status) {
4287 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4288 mgmt_status(status));
4289 } else {
4290 struct mgmt_mode *cp = cmd->param;
4291
4292 if (cp->val)
4293 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4294 else
4295 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4296
4297 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4298 new_settings(hdev, cmd->sk);
4299 }
4300
4301 mgmt_pending_remove(cmd);
4302
4303 unlock:
4304 hci_dev_unlock(hdev);
4305 }
4306
4307 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4308 void *data, u16 len)
4309 {
4310 struct mgmt_mode *cp = data;
4311 struct pending_cmd *cmd;
4312 struct hci_request req;
4313 int err;
4314
4315 BT_DBG("%s", hdev->name);
4316
4317 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4318 hdev->hci_ver < BLUETOOTH_VER_1_2)
4319 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4320 MGMT_STATUS_NOT_SUPPORTED);
4321
4322 if (cp->val != 0x00 && cp->val != 0x01)
4323 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4324 MGMT_STATUS_INVALID_PARAMS);
4325
4326 if (!hdev_is_powered(hdev))
4327 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4328 MGMT_STATUS_NOT_POWERED);
4329
4330 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4331 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4332 MGMT_STATUS_REJECTED);
4333
4334 hci_dev_lock(hdev);
4335
4336 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4337 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4338 MGMT_STATUS_BUSY);
4339 goto unlock;
4340 }
4341
4342 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4343 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4344 hdev);
4345 goto unlock;
4346 }
4347
4348 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4349 data, len);
4350 if (!cmd) {
4351 err = -ENOMEM;
4352 goto unlock;
4353 }
4354
4355 hci_req_init(&req, hdev);
4356
4357 write_fast_connectable(&req, cp->val);
4358
4359 err = hci_req_run(&req, fast_connectable_complete);
4360 if (err < 0) {
4361 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4362 MGMT_STATUS_FAILED);
4363 mgmt_pending_remove(cmd);
4364 }
4365
4366 unlock:
4367 hci_dev_unlock(hdev);
4368
4369 return err;
4370 }
4371
4372 static void set_bredr_scan(struct hci_request *req)
4373 {
4374 struct hci_dev *hdev = req->hdev;
4375 u8 scan = 0;
4376
4377 /* Ensure that fast connectable is disabled. This function will
4378 * not do anything if the page scan parameters are already what
4379 * they should be.
4380 */
4381 write_fast_connectable(req, false);
4382
4383 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4384 !list_empty(&hdev->whitelist))
4385 scan |= SCAN_PAGE;
4386 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4387 scan |= SCAN_INQUIRY;
4388
4389 if (scan)
4390 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4391 }
4392
4393 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4394 {
4395 struct pending_cmd *cmd;
4396
4397 BT_DBG("status 0x%02x", status);
4398
4399 hci_dev_lock(hdev);
4400
4401 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4402 if (!cmd)
4403 goto unlock;
4404
4405 if (status) {
4406 u8 mgmt_err = mgmt_status(status);
4407
4408 /* We need to restore the flag if related HCI commands
4409 * failed.
4410 */
4411 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4412
4413 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4414 } else {
4415 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4416 new_settings(hdev, cmd->sk);
4417 }
4418
4419 mgmt_pending_remove(cmd);
4420
4421 unlock:
4422 hci_dev_unlock(hdev);
4423 }
4424
4425 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4426 {
4427 struct mgmt_mode *cp = data;
4428 struct pending_cmd *cmd;
4429 struct hci_request req;
4430 int err;
4431
4432 BT_DBG("request for %s", hdev->name);
4433
4434 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4435 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4436 MGMT_STATUS_NOT_SUPPORTED);
4437
4438 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4439 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4440 MGMT_STATUS_REJECTED);
4441
4442 if (cp->val != 0x00 && cp->val != 0x01)
4443 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4444 MGMT_STATUS_INVALID_PARAMS);
4445
4446 hci_dev_lock(hdev);
4447
4448 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4449 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4450 goto unlock;
4451 }
4452
4453 if (!hdev_is_powered(hdev)) {
4454 if (!cp->val) {
4455 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4456 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4457 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4458 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4459 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4460 }
4461
4462 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4463
4464 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4465 if (err < 0)
4466 goto unlock;
4467
4468 err = new_settings(hdev, sk);
4469 goto unlock;
4470 }
4471
4472 /* Reject disabling when powered on */
4473 if (!cp->val) {
4474 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4475 MGMT_STATUS_REJECTED);
4476 goto unlock;
4477 }
4478
4479 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4480 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4481 MGMT_STATUS_BUSY);
4482 goto unlock;
4483 }
4484
4485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4486 if (!cmd) {
4487 err = -ENOMEM;
4488 goto unlock;
4489 }
4490
4491 /* We need to flip the bit already here so that update_adv_data
4492 * generates the correct flags.
4493 */
4494 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4495
4496 hci_req_init(&req, hdev);
4497
4498 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4499 !list_empty(&hdev->whitelist))
4500 set_bredr_scan(&req);
4501
4502 /* Since only the advertising data flags will change, there
4503 * is no need to update the scan response data.
4504 */
4505 update_adv_data(&req);
4506
4507 err = hci_req_run(&req, set_bredr_complete);
4508 if (err < 0)
4509 mgmt_pending_remove(cmd);
4510
4511 unlock:
4512 hci_dev_unlock(hdev);
4513 return err;
4514 }
4515
4516 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4517 void *data, u16 len)
4518 {
4519 struct mgmt_mode *cp = data;
4520 struct pending_cmd *cmd;
4521 u8 val, status;
4522 int err;
4523
4524 BT_DBG("request for %s", hdev->name);
4525
4526 status = mgmt_bredr_support(hdev);
4527 if (status)
4528 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4529 status);
4530
4531 if (!lmp_sc_capable(hdev) &&
4532 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4533 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4534 MGMT_STATUS_NOT_SUPPORTED);
4535
4536 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4537 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4538 MGMT_STATUS_INVALID_PARAMS);
4539
4540 hci_dev_lock(hdev);
4541
4542 if (!hdev_is_powered(hdev)) {
4543 bool changed;
4544
4545 if (cp->val) {
4546 changed = !test_and_set_bit(HCI_SC_ENABLED,
4547 &hdev->dev_flags);
4548 if (cp->val == 0x02)
4549 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4550 else
4551 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4552 } else {
4553 changed = test_and_clear_bit(HCI_SC_ENABLED,
4554 &hdev->dev_flags);
4555 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4556 }
4557
4558 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4559 if (err < 0)
4560 goto failed;
4561
4562 if (changed)
4563 err = new_settings(hdev, sk);
4564
4565 goto failed;
4566 }
4567
4568 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4569 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4570 MGMT_STATUS_BUSY);
4571 goto failed;
4572 }
4573
4574 val = !!cp->val;
4575
4576 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4577 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4578 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4579 goto failed;
4580 }
4581
4582 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4583 if (!cmd) {
4584 err = -ENOMEM;
4585 goto failed;
4586 }
4587
4588 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4589 if (err < 0) {
4590 mgmt_pending_remove(cmd);
4591 goto failed;
4592 }
4593
4594 if (cp->val == 0x02)
4595 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4596 else
4597 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4598
4599 failed:
4600 hci_dev_unlock(hdev);
4601 return err;
4602 }
4603
4604 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4605 void *data, u16 len)
4606 {
4607 struct mgmt_mode *cp = data;
4608 bool changed, use_changed;
4609 int err;
4610
4611 BT_DBG("request for %s", hdev->name);
4612
4613 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4614 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4615 MGMT_STATUS_INVALID_PARAMS);
4616
4617 hci_dev_lock(hdev);
4618
4619 if (cp->val)
4620 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4621 &hdev->dev_flags);
4622 else
4623 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4624 &hdev->dev_flags);
4625
4626 if (cp->val == 0x02)
4627 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4628 &hdev->dev_flags);
4629 else
4630 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4631 &hdev->dev_flags);
4632
4633 if (hdev_is_powered(hdev) && use_changed &&
4634 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4635 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4636 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4637 sizeof(mode), &mode);
4638 }
4639
4640 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4641 if (err < 0)
4642 goto unlock;
4643
4644 if (changed)
4645 err = new_settings(hdev, sk);
4646
4647 unlock:
4648 hci_dev_unlock(hdev);
4649 return err;
4650 }
4651
4652 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4653 u16 len)
4654 {
4655 struct mgmt_cp_set_privacy *cp = cp_data;
4656 bool changed;
4657 int err;
4658
4659 BT_DBG("request for %s", hdev->name);
4660
4661 if (!lmp_le_capable(hdev))
4662 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4663 MGMT_STATUS_NOT_SUPPORTED);
4664
4665 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4666 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4667 MGMT_STATUS_INVALID_PARAMS);
4668
4669 if (hdev_is_powered(hdev))
4670 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4671 MGMT_STATUS_REJECTED);
4672
4673 hci_dev_lock(hdev);
4674
4675 /* If user space supports this command it is also expected to
4676 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4677 */
4678 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4679
4680 if (cp->privacy) {
4681 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4682 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4683 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4684 } else {
4685 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4686 memset(hdev->irk, 0, sizeof(hdev->irk));
4687 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4688 }
4689
4690 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4691 if (err < 0)
4692 goto unlock;
4693
4694 if (changed)
4695 err = new_settings(hdev, sk);
4696
4697 unlock:
4698 hci_dev_unlock(hdev);
4699 return err;
4700 }
4701
4702 static bool irk_is_valid(struct mgmt_irk_info *irk)
4703 {
4704 switch (irk->addr.type) {
4705 case BDADDR_LE_PUBLIC:
4706 return true;
4707
4708 case BDADDR_LE_RANDOM:
4709 /* Two most significant bits shall be set */
4710 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4711 return false;
4712 return true;
4713 }
4714
4715 return false;
4716 }
4717
4718 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4719 u16 len)
4720 {
4721 struct mgmt_cp_load_irks *cp = cp_data;
4722 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4723 sizeof(struct mgmt_irk_info));
4724 u16 irk_count, expected_len;
4725 int i, err;
4726
4727 BT_DBG("request for %s", hdev->name);
4728
4729 if (!lmp_le_capable(hdev))
4730 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4731 MGMT_STATUS_NOT_SUPPORTED);
4732
4733 irk_count = __le16_to_cpu(cp->irk_count);
4734 if (irk_count > max_irk_count) {
4735 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4736 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4737 MGMT_STATUS_INVALID_PARAMS);
4738 }
4739
4740 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4741 if (expected_len != len) {
4742 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4743 expected_len, len);
4744 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4745 MGMT_STATUS_INVALID_PARAMS);
4746 }
4747
4748 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4749
4750 for (i = 0; i < irk_count; i++) {
4751 struct mgmt_irk_info *key = &cp->irks[i];
4752
4753 if (!irk_is_valid(key))
4754 return cmd_status(sk, hdev->id,
4755 MGMT_OP_LOAD_IRKS,
4756 MGMT_STATUS_INVALID_PARAMS);
4757 }
4758
4759 hci_dev_lock(hdev);
4760
4761 hci_smp_irks_clear(hdev);
4762
4763 for (i = 0; i < irk_count; i++) {
4764 struct mgmt_irk_info *irk = &cp->irks[i];
4765 u8 addr_type;
4766
4767 if (irk->addr.type == BDADDR_LE_PUBLIC)
4768 addr_type = ADDR_LE_DEV_PUBLIC;
4769 else
4770 addr_type = ADDR_LE_DEV_RANDOM;
4771
4772 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4773 BDADDR_ANY);
4774 }
4775
4776 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4777
4778 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4779
4780 hci_dev_unlock(hdev);
4781
4782 return err;
4783 }
4784
4785 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4786 {
4787 if (key->master != 0x00 && key->master != 0x01)
4788 return false;
4789
4790 switch (key->addr.type) {
4791 case BDADDR_LE_PUBLIC:
4792 return true;
4793
4794 case BDADDR_LE_RANDOM:
4795 /* Two most significant bits shall be set */
4796 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4797 return false;
4798 return true;
4799 }
4800
4801 return false;
4802 }
4803
4804 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4805 void *cp_data, u16 len)
4806 {
4807 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4808 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4809 sizeof(struct mgmt_ltk_info));
4810 u16 key_count, expected_len;
4811 int i, err;
4812
4813 BT_DBG("request for %s", hdev->name);
4814
4815 if (!lmp_le_capable(hdev))
4816 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4817 MGMT_STATUS_NOT_SUPPORTED);
4818
4819 key_count = __le16_to_cpu(cp->key_count);
4820 if (key_count > max_key_count) {
4821 BT_ERR("load_ltks: too big key_count value %u", key_count);
4822 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4823 MGMT_STATUS_INVALID_PARAMS);
4824 }
4825
4826 expected_len = sizeof(*cp) + key_count *
4827 sizeof(struct mgmt_ltk_info);
4828 if (expected_len != len) {
4829 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4830 expected_len, len);
4831 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4832 MGMT_STATUS_INVALID_PARAMS);
4833 }
4834
4835 BT_DBG("%s key_count %u", hdev->name, key_count);
4836
4837 for (i = 0; i < key_count; i++) {
4838 struct mgmt_ltk_info *key = &cp->keys[i];
4839
4840 if (!ltk_is_valid(key))
4841 return cmd_status(sk, hdev->id,
4842 MGMT_OP_LOAD_LONG_TERM_KEYS,
4843 MGMT_STATUS_INVALID_PARAMS);
4844 }
4845
4846 hci_dev_lock(hdev);
4847
4848 hci_smp_ltks_clear(hdev);
4849
4850 for (i = 0; i < key_count; i++) {
4851 struct mgmt_ltk_info *key = &cp->keys[i];
4852 u8 type, addr_type, authenticated;
4853
4854 if (key->addr.type == BDADDR_LE_PUBLIC)
4855 addr_type = ADDR_LE_DEV_PUBLIC;
4856 else
4857 addr_type = ADDR_LE_DEV_RANDOM;
4858
4859 if (key->master)
4860 type = SMP_LTK;
4861 else
4862 type = SMP_LTK_SLAVE;
4863
4864 switch (key->type) {
4865 case MGMT_LTK_UNAUTHENTICATED:
4866 authenticated = 0x00;
4867 break;
4868 case MGMT_LTK_AUTHENTICATED:
4869 authenticated = 0x01;
4870 break;
4871 default:
4872 continue;
4873 }
4874
4875 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4876 authenticated, key->val, key->enc_size, key->ediv,
4877 key->rand);
4878 }
4879
4880 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4881 NULL, 0);
4882
4883 hci_dev_unlock(hdev);
4884
4885 return err;
4886 }
4887
4888 struct cmd_conn_lookup {
4889 struct hci_conn *conn;
4890 bool valid_tx_power;
4891 u8 mgmt_status;
4892 };
4893
4894 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4895 {
4896 struct cmd_conn_lookup *match = data;
4897 struct mgmt_cp_get_conn_info *cp;
4898 struct mgmt_rp_get_conn_info rp;
4899 struct hci_conn *conn = cmd->user_data;
4900
4901 if (conn != match->conn)
4902 return;
4903
4904 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4905
4906 memset(&rp, 0, sizeof(rp));
4907 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4908 rp.addr.type = cp->addr.type;
4909
4910 if (!match->mgmt_status) {
4911 rp.rssi = conn->rssi;
4912
4913 if (match->valid_tx_power) {
4914 rp.tx_power = conn->tx_power;
4915 rp.max_tx_power = conn->max_tx_power;
4916 } else {
4917 rp.tx_power = HCI_TX_POWER_INVALID;
4918 rp.max_tx_power = HCI_TX_POWER_INVALID;
4919 }
4920 }
4921
4922 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4923 match->mgmt_status, &rp, sizeof(rp));
4924
4925 hci_conn_drop(conn);
4926
4927 mgmt_pending_remove(cmd);
4928 }
4929
4930 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4931 {
4932 struct hci_cp_read_rssi *cp;
4933 struct hci_conn *conn;
4934 struct cmd_conn_lookup match;
4935 u16 handle;
4936
4937 BT_DBG("status 0x%02x", status);
4938
4939 hci_dev_lock(hdev);
4940
4941 /* TX power data is valid in case request completed successfully,
4942 * otherwise we assume it's not valid. At the moment we assume that
4943 * either both or none of current and max values are valid to keep code
4944 * simple.
4945 */
4946 match.valid_tx_power = !status;
4947
4948 /* Commands sent in request are either Read RSSI or Read Transmit Power
4949 * Level so we check which one was last sent to retrieve connection
4950 * handle. Both commands have handle as first parameter so it's safe to
4951 * cast data on the same command struct.
4952 *
4953 * First command sent is always Read RSSI and we fail only if it fails.
4954 * In other case we simply override error to indicate success as we
4955 * already remembered if TX power value is actually valid.
4956 */
4957 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4958 if (!cp) {
4959 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4960 status = 0;
4961 }
4962
4963 if (!cp) {
4964 BT_ERR("invalid sent_cmd in response");
4965 goto unlock;
4966 }
4967
4968 handle = __le16_to_cpu(cp->handle);
4969 conn = hci_conn_hash_lookup_handle(hdev, handle);
4970 if (!conn) {
4971 BT_ERR("unknown handle (%d) in response", handle);
4972 goto unlock;
4973 }
4974
4975 match.conn = conn;
4976 match.mgmt_status = mgmt_status(status);
4977
4978 /* Cache refresh is complete, now reply for mgmt request for given
4979 * connection only.
4980 */
4981 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4982 get_conn_info_complete, &match);
4983
4984 unlock:
4985 hci_dev_unlock(hdev);
4986 }
4987
4988 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4989 u16 len)
4990 {
4991 struct mgmt_cp_get_conn_info *cp = data;
4992 struct mgmt_rp_get_conn_info rp;
4993 struct hci_conn *conn;
4994 unsigned long conn_info_age;
4995 int err = 0;
4996
4997 BT_DBG("%s", hdev->name);
4998
4999 memset(&rp, 0, sizeof(rp));
5000 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5001 rp.addr.type = cp->addr.type;
5002
5003 if (!bdaddr_type_is_valid(cp->addr.type))
5004 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5005 MGMT_STATUS_INVALID_PARAMS,
5006 &rp, sizeof(rp));
5007
5008 hci_dev_lock(hdev);
5009
5010 if (!hdev_is_powered(hdev)) {
5011 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5012 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5013 goto unlock;
5014 }
5015
5016 if (cp->addr.type == BDADDR_BREDR)
5017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5018 &cp->addr.bdaddr);
5019 else
5020 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5021
5022 if (!conn || conn->state != BT_CONNECTED) {
5023 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5024 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5025 goto unlock;
5026 }
5027
5028 /* To avoid client trying to guess when to poll again for information we
5029 * calculate conn info age as random value between min/max set in hdev.
5030 */
5031 conn_info_age = hdev->conn_info_min_age +
5032 prandom_u32_max(hdev->conn_info_max_age -
5033 hdev->conn_info_min_age);
5034
5035 /* Query controller to refresh cached values if they are too old or were
5036 * never read.
5037 */
5038 if (time_after(jiffies, conn->conn_info_timestamp +
5039 msecs_to_jiffies(conn_info_age)) ||
5040 !conn->conn_info_timestamp) {
5041 struct hci_request req;
5042 struct hci_cp_read_tx_power req_txp_cp;
5043 struct hci_cp_read_rssi req_rssi_cp;
5044 struct pending_cmd *cmd;
5045
5046 hci_req_init(&req, hdev);
5047 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5048 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5049 &req_rssi_cp);
5050
5051 /* For LE links TX power does not change thus we don't need to
5052 * query for it once value is known.
5053 */
5054 if (!bdaddr_type_is_le(cp->addr.type) ||
5055 conn->tx_power == HCI_TX_POWER_INVALID) {
5056 req_txp_cp.handle = cpu_to_le16(conn->handle);
5057 req_txp_cp.type = 0x00;
5058 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5059 sizeof(req_txp_cp), &req_txp_cp);
5060 }
5061
5062 /* Max TX power needs to be read only once per connection */
5063 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5064 req_txp_cp.handle = cpu_to_le16(conn->handle);
5065 req_txp_cp.type = 0x01;
5066 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5067 sizeof(req_txp_cp), &req_txp_cp);
5068 }
5069
5070 err = hci_req_run(&req, conn_info_refresh_complete);
5071 if (err < 0)
5072 goto unlock;
5073
5074 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5075 data, len);
5076 if (!cmd) {
5077 err = -ENOMEM;
5078 goto unlock;
5079 }
5080
5081 hci_conn_hold(conn);
5082 cmd->user_data = conn;
5083
5084 conn->conn_info_timestamp = jiffies;
5085 } else {
5086 /* Cache is valid, just reply with values cached in hci_conn */
5087 rp.rssi = conn->rssi;
5088 rp.tx_power = conn->tx_power;
5089 rp.max_tx_power = conn->max_tx_power;
5090
5091 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5092 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5093 }
5094
5095 unlock:
5096 hci_dev_unlock(hdev);
5097 return err;
5098 }
5099
5100 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5101 {
5102 struct mgmt_cp_get_clock_info *cp;
5103 struct mgmt_rp_get_clock_info rp;
5104 struct hci_cp_read_clock *hci_cp;
5105 struct pending_cmd *cmd;
5106 struct hci_conn *conn;
5107
5108 BT_DBG("%s status %u", hdev->name, status);
5109
5110 hci_dev_lock(hdev);
5111
5112 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5113 if (!hci_cp)
5114 goto unlock;
5115
5116 if (hci_cp->which) {
5117 u16 handle = __le16_to_cpu(hci_cp->handle);
5118 conn = hci_conn_hash_lookup_handle(hdev, handle);
5119 } else {
5120 conn = NULL;
5121 }
5122
5123 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5124 if (!cmd)
5125 goto unlock;
5126
5127 cp = cmd->param;
5128
5129 memset(&rp, 0, sizeof(rp));
5130 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5131
5132 if (status)
5133 goto send_rsp;
5134
5135 rp.local_clock = cpu_to_le32(hdev->clock);
5136
5137 if (conn) {
5138 rp.piconet_clock = cpu_to_le32(conn->clock);
5139 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5140 }
5141
5142 send_rsp:
5143 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5144 &rp, sizeof(rp));
5145 mgmt_pending_remove(cmd);
5146 if (conn)
5147 hci_conn_drop(conn);
5148
5149 unlock:
5150 hci_dev_unlock(hdev);
5151 }
5152
5153 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5154 u16 len)
5155 {
5156 struct mgmt_cp_get_clock_info *cp = data;
5157 struct mgmt_rp_get_clock_info rp;
5158 struct hci_cp_read_clock hci_cp;
5159 struct pending_cmd *cmd;
5160 struct hci_request req;
5161 struct hci_conn *conn;
5162 int err;
5163
5164 BT_DBG("%s", hdev->name);
5165
5166 memset(&rp, 0, sizeof(rp));
5167 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5168 rp.addr.type = cp->addr.type;
5169
5170 if (cp->addr.type != BDADDR_BREDR)
5171 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5172 MGMT_STATUS_INVALID_PARAMS,
5173 &rp, sizeof(rp));
5174
5175 hci_dev_lock(hdev);
5176
5177 if (!hdev_is_powered(hdev)) {
5178 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5179 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5180 goto unlock;
5181 }
5182
5183 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5184 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5185 &cp->addr.bdaddr);
5186 if (!conn || conn->state != BT_CONNECTED) {
5187 err = cmd_complete(sk, hdev->id,
5188 MGMT_OP_GET_CLOCK_INFO,
5189 MGMT_STATUS_NOT_CONNECTED,
5190 &rp, sizeof(rp));
5191 goto unlock;
5192 }
5193 } else {
5194 conn = NULL;
5195 }
5196
5197 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5198 if (!cmd) {
5199 err = -ENOMEM;
5200 goto unlock;
5201 }
5202
5203 hci_req_init(&req, hdev);
5204
5205 memset(&hci_cp, 0, sizeof(hci_cp));
5206 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5207
5208 if (conn) {
5209 hci_conn_hold(conn);
5210 cmd->user_data = conn;
5211
5212 hci_cp.handle = cpu_to_le16(conn->handle);
5213 hci_cp.which = 0x01; /* Piconet clock */
5214 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5215 }
5216
5217 err = hci_req_run(&req, get_clock_info_complete);
5218 if (err < 0)
5219 mgmt_pending_remove(cmd);
5220
5221 unlock:
5222 hci_dev_unlock(hdev);
5223 return err;
5224 }
5225
5226 /* Helper for Add/Remove Device commands */
5227 static void update_page_scan(struct hci_dev *hdev, u8 scan)
5228 {
5229 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5230 return;
5231
5232 if (!hdev_is_powered(hdev))
5233 return;
5234
5235 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5236 * make any changes to page scanning.
5237 */
5238 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5239 return;
5240
5241 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5242 scan |= SCAN_INQUIRY;
5243
5244 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5245 }
5246
5247 static void device_added(struct sock *sk, struct hci_dev *hdev,
5248 bdaddr_t *bdaddr, u8 type, u8 action)
5249 {
5250 struct mgmt_ev_device_added ev;
5251
5252 bacpy(&ev.addr.bdaddr, bdaddr);
5253 ev.addr.type = type;
5254 ev.action = action;
5255
5256 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5257 }
5258
5259 static int add_device(struct sock *sk, struct hci_dev *hdev,
5260 void *data, u16 len)
5261 {
5262 struct mgmt_cp_add_device *cp = data;
5263 u8 auto_conn, addr_type;
5264 int err;
5265
5266 BT_DBG("%s", hdev->name);
5267
5268 if (!bdaddr_type_is_valid(cp->addr.type) ||
5269 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5270 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5271 MGMT_STATUS_INVALID_PARAMS,
5272 &cp->addr, sizeof(cp->addr));
5273
5274 if (cp->action != 0x00 && cp->action != 0x01)
5275 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5276 MGMT_STATUS_INVALID_PARAMS,
5277 &cp->addr, sizeof(cp->addr));
5278
5279 hci_dev_lock(hdev);
5280
5281 if (cp->addr.type == BDADDR_BREDR) {
5282 bool update_scan;
5283
5284 /* Only "connect" action supported for now */
5285 if (cp->action != 0x01) {
5286 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5287 MGMT_STATUS_INVALID_PARAMS,
5288 &cp->addr, sizeof(cp->addr));
5289 goto unlock;
5290 }
5291
5292 update_scan = list_empty(&hdev->whitelist);
5293
5294 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5295 cp->addr.type);
5296 if (err)
5297 goto unlock;
5298
5299 if (update_scan)
5300 update_page_scan(hdev, SCAN_PAGE);
5301
5302 goto added;
5303 }
5304
5305 if (cp->addr.type == BDADDR_LE_PUBLIC)
5306 addr_type = ADDR_LE_DEV_PUBLIC;
5307 else
5308 addr_type = ADDR_LE_DEV_RANDOM;
5309
5310 if (cp->action)
5311 auto_conn = HCI_AUTO_CONN_ALWAYS;
5312 else
5313 auto_conn = HCI_AUTO_CONN_REPORT;
5314
5315 /* If the connection parameters don't exist for this device,
5316 * they will be created and configured with defaults.
5317 */
5318 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5319 auto_conn) < 0) {
5320 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5321 MGMT_STATUS_FAILED,
5322 &cp->addr, sizeof(cp->addr));
5323 goto unlock;
5324 }
5325
5326 added:
5327 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5328
5329 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5330 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5331
5332 unlock:
5333 hci_dev_unlock(hdev);
5334 return err;
5335 }
5336
5337 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5338 bdaddr_t *bdaddr, u8 type)
5339 {
5340 struct mgmt_ev_device_removed ev;
5341
5342 bacpy(&ev.addr.bdaddr, bdaddr);
5343 ev.addr.type = type;
5344
5345 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5346 }
5347
5348 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5349 void *data, u16 len)
5350 {
5351 struct mgmt_cp_remove_device *cp = data;
5352 int err;
5353
5354 BT_DBG("%s", hdev->name);
5355
5356 hci_dev_lock(hdev);
5357
5358 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5359 struct hci_conn_params *params;
5360 u8 addr_type;
5361
5362 if (!bdaddr_type_is_valid(cp->addr.type)) {
5363 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5364 MGMT_STATUS_INVALID_PARAMS,
5365 &cp->addr, sizeof(cp->addr));
5366 goto unlock;
5367 }
5368
5369 if (cp->addr.type == BDADDR_BREDR) {
5370 err = hci_bdaddr_list_del(&hdev->whitelist,
5371 &cp->addr.bdaddr,
5372 cp->addr.type);
5373 if (err) {
5374 err = cmd_complete(sk, hdev->id,
5375 MGMT_OP_REMOVE_DEVICE,
5376 MGMT_STATUS_INVALID_PARAMS,
5377 &cp->addr, sizeof(cp->addr));
5378 goto unlock;
5379 }
5380
5381 if (list_empty(&hdev->whitelist))
5382 update_page_scan(hdev, SCAN_DISABLED);
5383
5384 device_removed(sk, hdev, &cp->addr.bdaddr,
5385 cp->addr.type);
5386 goto complete;
5387 }
5388
5389 if (cp->addr.type == BDADDR_LE_PUBLIC)
5390 addr_type = ADDR_LE_DEV_PUBLIC;
5391 else
5392 addr_type = ADDR_LE_DEV_RANDOM;
5393
5394 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5395 addr_type);
5396 if (!params) {
5397 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5398 MGMT_STATUS_INVALID_PARAMS,
5399 &cp->addr, sizeof(cp->addr));
5400 goto unlock;
5401 }
5402
5403 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5404 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5405 MGMT_STATUS_INVALID_PARAMS,
5406 &cp->addr, sizeof(cp->addr));
5407 goto unlock;
5408 }
5409
5410 list_del(&params->action);
5411 list_del(&params->list);
5412 kfree(params);
5413 hci_update_background_scan(hdev);
5414
5415 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5416 } else {
5417 struct hci_conn_params *p, *tmp;
5418 struct bdaddr_list *b, *btmp;
5419
5420 if (cp->addr.type) {
5421 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5422 MGMT_STATUS_INVALID_PARAMS,
5423 &cp->addr, sizeof(cp->addr));
5424 goto unlock;
5425 }
5426
5427 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5428 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5429 list_del(&b->list);
5430 kfree(b);
5431 }
5432
5433 update_page_scan(hdev, SCAN_DISABLED);
5434
5435 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5436 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5437 continue;
5438 device_removed(sk, hdev, &p->addr, p->addr_type);
5439 list_del(&p->action);
5440 list_del(&p->list);
5441 kfree(p);
5442 }
5443
5444 BT_DBG("All LE connection parameters were removed");
5445
5446 hci_update_background_scan(hdev);
5447 }
5448
5449 complete:
5450 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5451 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5452
5453 unlock:
5454 hci_dev_unlock(hdev);
5455 return err;
5456 }
5457
5458 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5459 u16 len)
5460 {
5461 struct mgmt_cp_load_conn_param *cp = data;
5462 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5463 sizeof(struct mgmt_conn_param));
5464 u16 param_count, expected_len;
5465 int i;
5466
5467 if (!lmp_le_capable(hdev))
5468 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5469 MGMT_STATUS_NOT_SUPPORTED);
5470
5471 param_count = __le16_to_cpu(cp->param_count);
5472 if (param_count > max_param_count) {
5473 BT_ERR("load_conn_param: too big param_count value %u",
5474 param_count);
5475 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5476 MGMT_STATUS_INVALID_PARAMS);
5477 }
5478
5479 expected_len = sizeof(*cp) + param_count *
5480 sizeof(struct mgmt_conn_param);
5481 if (expected_len != len) {
5482 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5483 expected_len, len);
5484 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5485 MGMT_STATUS_INVALID_PARAMS);
5486 }
5487
5488 BT_DBG("%s param_count %u", hdev->name, param_count);
5489
5490 hci_dev_lock(hdev);
5491
5492 hci_conn_params_clear_disabled(hdev);
5493
5494 for (i = 0; i < param_count; i++) {
5495 struct mgmt_conn_param *param = &cp->params[i];
5496 struct hci_conn_params *hci_param;
5497 u16 min, max, latency, timeout;
5498 u8 addr_type;
5499
5500 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5501 param->addr.type);
5502
5503 if (param->addr.type == BDADDR_LE_PUBLIC) {
5504 addr_type = ADDR_LE_DEV_PUBLIC;
5505 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5506 addr_type = ADDR_LE_DEV_RANDOM;
5507 } else {
5508 BT_ERR("Ignoring invalid connection parameters");
5509 continue;
5510 }
5511
5512 min = le16_to_cpu(param->min_interval);
5513 max = le16_to_cpu(param->max_interval);
5514 latency = le16_to_cpu(param->latency);
5515 timeout = le16_to_cpu(param->timeout);
5516
5517 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5518 min, max, latency, timeout);
5519
5520 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5521 BT_ERR("Ignoring invalid connection parameters");
5522 continue;
5523 }
5524
5525 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5526 addr_type);
5527 if (!hci_param) {
5528 BT_ERR("Failed to add connection parameters");
5529 continue;
5530 }
5531
5532 hci_param->conn_min_interval = min;
5533 hci_param->conn_max_interval = max;
5534 hci_param->conn_latency = latency;
5535 hci_param->supervision_timeout = timeout;
5536 }
5537
5538 hci_dev_unlock(hdev);
5539
5540 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5541 }
5542
5543 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5544 void *data, u16 len)
5545 {
5546 struct mgmt_cp_set_external_config *cp = data;
5547 bool changed;
5548 int err;
5549
5550 BT_DBG("%s", hdev->name);
5551
5552 if (hdev_is_powered(hdev))
5553 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5554 MGMT_STATUS_REJECTED);
5555
5556 if (cp->config != 0x00 && cp->config != 0x01)
5557 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5558 MGMT_STATUS_INVALID_PARAMS);
5559
5560 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5561 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5562 MGMT_STATUS_NOT_SUPPORTED);
5563
5564 hci_dev_lock(hdev);
5565
5566 if (cp->config)
5567 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5568 &hdev->dev_flags);
5569 else
5570 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5571 &hdev->dev_flags);
5572
5573 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5574 if (err < 0)
5575 goto unlock;
5576
5577 if (!changed)
5578 goto unlock;
5579
5580 err = new_options(hdev, sk);
5581
5582 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5583 mgmt_index_removed(hdev);
5584
5585 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5586 set_bit(HCI_CONFIG, &hdev->dev_flags);
5587 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5588
5589 queue_work(hdev->req_workqueue, &hdev->power_on);
5590 } else {
5591 set_bit(HCI_RAW, &hdev->flags);
5592 mgmt_index_added(hdev);
5593 }
5594 }
5595
5596 unlock:
5597 hci_dev_unlock(hdev);
5598 return err;
5599 }
5600
5601 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5602 void *data, u16 len)
5603 {
5604 struct mgmt_cp_set_public_address *cp = data;
5605 bool changed;
5606 int err;
5607
5608 BT_DBG("%s", hdev->name);
5609
5610 if (hdev_is_powered(hdev))
5611 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5612 MGMT_STATUS_REJECTED);
5613
5614 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5615 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5616 MGMT_STATUS_INVALID_PARAMS);
5617
5618 if (!hdev->set_bdaddr)
5619 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5620 MGMT_STATUS_NOT_SUPPORTED);
5621
5622 hci_dev_lock(hdev);
5623
5624 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5625 bacpy(&hdev->public_addr, &cp->bdaddr);
5626
5627 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5628 if (err < 0)
5629 goto unlock;
5630
5631 if (!changed)
5632 goto unlock;
5633
5634 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5635 err = new_options(hdev, sk);
5636
5637 if (is_configured(hdev)) {
5638 mgmt_index_removed(hdev);
5639
5640 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5641
5642 set_bit(HCI_CONFIG, &hdev->dev_flags);
5643 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5644
5645 queue_work(hdev->req_workqueue, &hdev->power_on);
5646 }
5647
5648 unlock:
5649 hci_dev_unlock(hdev);
5650 return err;
5651 }
5652
5653 static const struct mgmt_handler {
5654 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5655 u16 data_len);
5656 bool var_len;
5657 size_t data_len;
5658 } mgmt_handlers[] = {
5659 { NULL }, /* 0x0000 (no command) */
5660 { read_version, false, MGMT_READ_VERSION_SIZE },
5661 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5662 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5663 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5664 { set_powered, false, MGMT_SETTING_SIZE },
5665 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5666 { set_connectable, false, MGMT_SETTING_SIZE },
5667 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5668 { set_pairable, false, MGMT_SETTING_SIZE },
5669 { set_link_security, false, MGMT_SETTING_SIZE },
5670 { set_ssp, false, MGMT_SETTING_SIZE },
5671 { set_hs, false, MGMT_SETTING_SIZE },
5672 { set_le, false, MGMT_SETTING_SIZE },
5673 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5674 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5675 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5676 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5677 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5678 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5679 { disconnect, false, MGMT_DISCONNECT_SIZE },
5680 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5681 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5682 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5683 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5684 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5685 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5686 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5687 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5688 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5689 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5690 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5691 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5692 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5693 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5694 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5695 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5696 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5697 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5698 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5699 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5700 { set_advertising, false, MGMT_SETTING_SIZE },
5701 { set_bredr, false, MGMT_SETTING_SIZE },
5702 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5703 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5704 { set_secure_conn, false, MGMT_SETTING_SIZE },
5705 { set_debug_keys, false, MGMT_SETTING_SIZE },
5706 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5707 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5708 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5709 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5710 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5711 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5712 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5713 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5714 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5715 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5716 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5717 };
5718
5719 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5720 {
5721 void *buf;
5722 u8 *cp;
5723 struct mgmt_hdr *hdr;
5724 u16 opcode, index, len;
5725 struct hci_dev *hdev = NULL;
5726 const struct mgmt_handler *handler;
5727 int err;
5728
5729 BT_DBG("got %zu bytes", msglen);
5730
5731 if (msglen < sizeof(*hdr))
5732 return -EINVAL;
5733
5734 buf = kmalloc(msglen, GFP_KERNEL);
5735 if (!buf)
5736 return -ENOMEM;
5737
5738 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5739 err = -EFAULT;
5740 goto done;
5741 }
5742
5743 hdr = buf;
5744 opcode = __le16_to_cpu(hdr->opcode);
5745 index = __le16_to_cpu(hdr->index);
5746 len = __le16_to_cpu(hdr->len);
5747
5748 if (len != msglen - sizeof(*hdr)) {
5749 err = -EINVAL;
5750 goto done;
5751 }
5752
5753 if (index != MGMT_INDEX_NONE) {
5754 hdev = hci_dev_get(index);
5755 if (!hdev) {
5756 err = cmd_status(sk, index, opcode,
5757 MGMT_STATUS_INVALID_INDEX);
5758 goto done;
5759 }
5760
5761 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5762 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5763 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5764 err = cmd_status(sk, index, opcode,
5765 MGMT_STATUS_INVALID_INDEX);
5766 goto done;
5767 }
5768
5769 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5770 opcode != MGMT_OP_READ_CONFIG_INFO &&
5771 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5772 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5773 err = cmd_status(sk, index, opcode,
5774 MGMT_STATUS_INVALID_INDEX);
5775 goto done;
5776 }
5777 }
5778
5779 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5780 mgmt_handlers[opcode].func == NULL) {
5781 BT_DBG("Unknown op %u", opcode);
5782 err = cmd_status(sk, index, opcode,
5783 MGMT_STATUS_UNKNOWN_COMMAND);
5784 goto done;
5785 }
5786
5787 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5788 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5789 err = cmd_status(sk, index, opcode,
5790 MGMT_STATUS_INVALID_INDEX);
5791 goto done;
5792 }
5793
5794 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5795 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5796 err = cmd_status(sk, index, opcode,
5797 MGMT_STATUS_INVALID_INDEX);
5798 goto done;
5799 }
5800
5801 handler = &mgmt_handlers[opcode];
5802
5803 if ((handler->var_len && len < handler->data_len) ||
5804 (!handler->var_len && len != handler->data_len)) {
5805 err = cmd_status(sk, index, opcode,
5806 MGMT_STATUS_INVALID_PARAMS);
5807 goto done;
5808 }
5809
5810 if (hdev)
5811 mgmt_init_hdev(sk, hdev);
5812
5813 cp = buf + sizeof(*hdr);
5814
5815 err = handler->func(sk, hdev, cp, len);
5816 if (err < 0)
5817 goto done;
5818
5819 err = msglen;
5820
5821 done:
5822 if (hdev)
5823 hci_dev_put(hdev);
5824
5825 kfree(buf);
5826 return err;
5827 }
5828
5829 void mgmt_index_added(struct hci_dev *hdev)
5830 {
5831 if (hdev->dev_type != HCI_BREDR)
5832 return;
5833
5834 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5835 return;
5836
5837 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5838 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5839 else
5840 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5841 }
5842
5843 void mgmt_index_removed(struct hci_dev *hdev)
5844 {
5845 u8 status = MGMT_STATUS_INVALID_INDEX;
5846
5847 if (hdev->dev_type != HCI_BREDR)
5848 return;
5849
5850 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5851 return;
5852
5853 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5854
5855 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5856 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5857 else
5858 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5859 }
5860
5861 /* This function requires the caller holds hdev->lock */
5862 static void restart_le_actions(struct hci_dev *hdev)
5863 {
5864 struct hci_conn_params *p;
5865
5866 list_for_each_entry(p, &hdev->le_conn_params, list) {
5867 /* Needed for AUTO_OFF case where might not "really"
5868 * have been powered off.
5869 */
5870 list_del_init(&p->action);
5871
5872 switch (p->auto_connect) {
5873 case HCI_AUTO_CONN_ALWAYS:
5874 list_add(&p->action, &hdev->pend_le_conns);
5875 break;
5876 case HCI_AUTO_CONN_REPORT:
5877 list_add(&p->action, &hdev->pend_le_reports);
5878 break;
5879 default:
5880 break;
5881 }
5882 }
5883
5884 hci_update_background_scan(hdev);
5885 }
5886
5887 static void powered_complete(struct hci_dev *hdev, u8 status)
5888 {
5889 struct cmd_lookup match = { NULL, hdev };
5890
5891 BT_DBG("status 0x%02x", status);
5892
5893 hci_dev_lock(hdev);
5894
5895 restart_le_actions(hdev);
5896
5897 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5898
5899 new_settings(hdev, match.sk);
5900
5901 hci_dev_unlock(hdev);
5902
5903 if (match.sk)
5904 sock_put(match.sk);
5905 }
5906
5907 static int powered_update_hci(struct hci_dev *hdev)
5908 {
5909 struct hci_request req;
5910 u8 link_sec;
5911
5912 hci_req_init(&req, hdev);
5913
5914 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5915 !lmp_host_ssp_capable(hdev)) {
5916 u8 ssp = 1;
5917
5918 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5919 }
5920
5921 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5922 lmp_bredr_capable(hdev)) {
5923 struct hci_cp_write_le_host_supported cp;
5924
5925 cp.le = 1;
5926 cp.simul = lmp_le_br_capable(hdev);
5927
5928 /* Check first if we already have the right
5929 * host state (host features set)
5930 */
5931 if (cp.le != lmp_host_le_capable(hdev) ||
5932 cp.simul != lmp_host_le_br_capable(hdev))
5933 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5934 sizeof(cp), &cp);
5935 }
5936
5937 if (lmp_le_capable(hdev)) {
5938 /* Make sure the controller has a good default for
5939 * advertising data. This also applies to the case
5940 * where BR/EDR was toggled during the AUTO_OFF phase.
5941 */
5942 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5943 update_adv_data(&req);
5944 update_scan_rsp_data(&req);
5945 }
5946
5947 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5948 enable_advertising(&req);
5949 }
5950
5951 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5952 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5953 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5954 sizeof(link_sec), &link_sec);
5955
5956 if (lmp_bredr_capable(hdev)) {
5957 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5958 set_bredr_scan(&req);
5959 update_class(&req);
5960 update_name(&req);
5961 update_eir(&req);
5962 }
5963
5964 return hci_req_run(&req, powered_complete);
5965 }
5966
5967 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5968 {
5969 struct cmd_lookup match = { NULL, hdev };
5970 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5971 u8 zero_cod[] = { 0, 0, 0 };
5972 int err;
5973
5974 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5975 return 0;
5976
5977 if (powered) {
5978 if (powered_update_hci(hdev) == 0)
5979 return 0;
5980
5981 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5982 &match);
5983 goto new_settings;
5984 }
5985
5986 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5987 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5988
5989 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5990 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5991 zero_cod, sizeof(zero_cod), NULL);
5992
5993 new_settings:
5994 err = new_settings(hdev, match.sk);
5995
5996 if (match.sk)
5997 sock_put(match.sk);
5998
5999 return err;
6000 }
6001
6002 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6003 {
6004 struct pending_cmd *cmd;
6005 u8 status;
6006
6007 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6008 if (!cmd)
6009 return;
6010
6011 if (err == -ERFKILL)
6012 status = MGMT_STATUS_RFKILLED;
6013 else
6014 status = MGMT_STATUS_FAILED;
6015
6016 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6017
6018 mgmt_pending_remove(cmd);
6019 }
6020
6021 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6022 {
6023 struct hci_request req;
6024
6025 hci_dev_lock(hdev);
6026
6027 /* When discoverable timeout triggers, then just make sure
6028 * the limited discoverable flag is cleared. Even in the case
6029 * of a timeout triggered from general discoverable, it is
6030 * safe to unconditionally clear the flag.
6031 */
6032 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6033 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6034
6035 hci_req_init(&req, hdev);
6036 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6037 u8 scan = SCAN_PAGE;
6038 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6039 sizeof(scan), &scan);
6040 }
6041 update_class(&req);
6042 update_adv_data(&req);
6043 hci_req_run(&req, NULL);
6044
6045 hdev->discov_timeout = 0;
6046
6047 new_settings(hdev, NULL);
6048
6049 hci_dev_unlock(hdev);
6050 }
6051
6052 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6053 bool persistent)
6054 {
6055 struct mgmt_ev_new_link_key ev;
6056
6057 memset(&ev, 0, sizeof(ev));
6058
6059 ev.store_hint = persistent;
6060 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6061 ev.key.addr.type = BDADDR_BREDR;
6062 ev.key.type = key->type;
6063 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6064 ev.key.pin_len = key->pin_len;
6065
6066 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6067 }
6068
6069 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6070 {
6071 if (ltk->authenticated)
6072 return MGMT_LTK_AUTHENTICATED;
6073
6074 return MGMT_LTK_UNAUTHENTICATED;
6075 }
6076
6077 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6078 {
6079 struct mgmt_ev_new_long_term_key ev;
6080
6081 memset(&ev, 0, sizeof(ev));
6082
6083 /* Devices using resolvable or non-resolvable random addresses
6084 * without providing an indentity resolving key don't require
6085 * to store long term keys. Their addresses will change the
6086 * next time around.
6087 *
6088 * Only when a remote device provides an identity address
6089 * make sure the long term key is stored. If the remote
6090 * identity is known, the long term keys are internally
6091 * mapped to the identity address. So allow static random
6092 * and public addresses here.
6093 */
6094 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6095 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6096 ev.store_hint = 0x00;
6097 else
6098 ev.store_hint = persistent;
6099
6100 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6101 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6102 ev.key.type = mgmt_ltk_type(key);
6103 ev.key.enc_size = key->enc_size;
6104 ev.key.ediv = key->ediv;
6105 ev.key.rand = key->rand;
6106
6107 if (key->type == SMP_LTK)
6108 ev.key.master = 1;
6109
6110 memcpy(ev.key.val, key->val, sizeof(key->val));
6111
6112 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6113 }
6114
6115 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6116 {
6117 struct mgmt_ev_new_irk ev;
6118
6119 memset(&ev, 0, sizeof(ev));
6120
6121 /* For identity resolving keys from devices that are already
6122 * using a public address or static random address, do not
6123 * ask for storing this key. The identity resolving key really
6124 * is only mandatory for devices using resovlable random
6125 * addresses.
6126 *
6127 * Storing all identity resolving keys has the downside that
6128 * they will be also loaded on next boot of they system. More
6129 * identity resolving keys, means more time during scanning is
6130 * needed to actually resolve these addresses.
6131 */
6132 if (bacmp(&irk->rpa, BDADDR_ANY))
6133 ev.store_hint = 0x01;
6134 else
6135 ev.store_hint = 0x00;
6136
6137 bacpy(&ev.rpa, &irk->rpa);
6138 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6139 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6140 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6141
6142 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6143 }
6144
6145 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6146 bool persistent)
6147 {
6148 struct mgmt_ev_new_csrk ev;
6149
6150 memset(&ev, 0, sizeof(ev));
6151
6152 /* Devices using resolvable or non-resolvable random addresses
6153 * without providing an indentity resolving key don't require
6154 * to store signature resolving keys. Their addresses will change
6155 * the next time around.
6156 *
6157 * Only when a remote device provides an identity address
6158 * make sure the signature resolving key is stored. So allow
6159 * static random and public addresses here.
6160 */
6161 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6162 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6163 ev.store_hint = 0x00;
6164 else
6165 ev.store_hint = persistent;
6166
6167 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6168 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6169 ev.key.master = csrk->master;
6170 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6171
6172 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6173 }
6174
6175 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6176 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6177 u16 max_interval, u16 latency, u16 timeout)
6178 {
6179 struct mgmt_ev_new_conn_param ev;
6180
6181 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6182 return;
6183
6184 memset(&ev, 0, sizeof(ev));
6185 bacpy(&ev.addr.bdaddr, bdaddr);
6186 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6187 ev.store_hint = store_hint;
6188 ev.min_interval = cpu_to_le16(min_interval);
6189 ev.max_interval = cpu_to_le16(max_interval);
6190 ev.latency = cpu_to_le16(latency);
6191 ev.timeout = cpu_to_le16(timeout);
6192
6193 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6194 }
6195
6196 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6197 u8 data_len)
6198 {
6199 eir[eir_len++] = sizeof(type) + data_len;
6200 eir[eir_len++] = type;
6201 memcpy(&eir[eir_len], data, data_len);
6202 eir_len += data_len;
6203
6204 return eir_len;
6205 }
6206
6207 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6208 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6209 u8 *dev_class)
6210 {
6211 char buf[512];
6212 struct mgmt_ev_device_connected *ev = (void *) buf;
6213 u16 eir_len = 0;
6214
6215 bacpy(&ev->addr.bdaddr, bdaddr);
6216 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6217
6218 ev->flags = __cpu_to_le32(flags);
6219
6220 if (name_len > 0)
6221 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6222 name, name_len);
6223
6224 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6225 eir_len = eir_append_data(ev->eir, eir_len,
6226 EIR_CLASS_OF_DEV, dev_class, 3);
6227
6228 ev->eir_len = cpu_to_le16(eir_len);
6229
6230 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6231 sizeof(*ev) + eir_len, NULL);
6232 }
6233
6234 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6235 {
6236 struct mgmt_cp_disconnect *cp = cmd->param;
6237 struct sock **sk = data;
6238 struct mgmt_rp_disconnect rp;
6239
6240 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6241 rp.addr.type = cp->addr.type;
6242
6243 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6244 sizeof(rp));
6245
6246 *sk = cmd->sk;
6247 sock_hold(*sk);
6248
6249 mgmt_pending_remove(cmd);
6250 }
6251
6252 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6253 {
6254 struct hci_dev *hdev = data;
6255 struct mgmt_cp_unpair_device *cp = cmd->param;
6256 struct mgmt_rp_unpair_device rp;
6257
6258 memset(&rp, 0, sizeof(rp));
6259 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6260 rp.addr.type = cp->addr.type;
6261
6262 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6263
6264 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6265
6266 mgmt_pending_remove(cmd);
6267 }
6268
6269 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6270 u8 link_type, u8 addr_type, u8 reason,
6271 bool mgmt_connected)
6272 {
6273 struct mgmt_ev_device_disconnected ev;
6274 struct pending_cmd *power_off;
6275 struct sock *sk = NULL;
6276
6277 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6278 if (power_off) {
6279 struct mgmt_mode *cp = power_off->param;
6280
6281 /* The connection is still in hci_conn_hash so test for 1
6282 * instead of 0 to know if this is the last one.
6283 */
6284 if (!cp->val && hci_conn_count(hdev) == 1) {
6285 cancel_delayed_work(&hdev->power_off);
6286 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6287 }
6288 }
6289
6290 if (!mgmt_connected)
6291 return;
6292
6293 if (link_type != ACL_LINK && link_type != LE_LINK)
6294 return;
6295
6296 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6297
6298 bacpy(&ev.addr.bdaddr, bdaddr);
6299 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6300 ev.reason = reason;
6301
6302 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6303
6304 if (sk)
6305 sock_put(sk);
6306
6307 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6308 hdev);
6309 }
6310
6311 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6312 u8 link_type, u8 addr_type, u8 status)
6313 {
6314 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6315 struct mgmt_cp_disconnect *cp;
6316 struct mgmt_rp_disconnect rp;
6317 struct pending_cmd *cmd;
6318
6319 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6320 hdev);
6321
6322 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6323 if (!cmd)
6324 return;
6325
6326 cp = cmd->param;
6327
6328 if (bacmp(bdaddr, &cp->addr.bdaddr))
6329 return;
6330
6331 if (cp->addr.type != bdaddr_type)
6332 return;
6333
6334 bacpy(&rp.addr.bdaddr, bdaddr);
6335 rp.addr.type = bdaddr_type;
6336
6337 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6338 mgmt_status(status), &rp, sizeof(rp));
6339
6340 mgmt_pending_remove(cmd);
6341 }
6342
6343 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6344 u8 addr_type, u8 status)
6345 {
6346 struct mgmt_ev_connect_failed ev;
6347 struct pending_cmd *power_off;
6348
6349 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6350 if (power_off) {
6351 struct mgmt_mode *cp = power_off->param;
6352
6353 /* The connection is still in hci_conn_hash so test for 1
6354 * instead of 0 to know if this is the last one.
6355 */
6356 if (!cp->val && hci_conn_count(hdev) == 1) {
6357 cancel_delayed_work(&hdev->power_off);
6358 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6359 }
6360 }
6361
6362 bacpy(&ev.addr.bdaddr, bdaddr);
6363 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6364 ev.status = mgmt_status(status);
6365
6366 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6367 }
6368
6369 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6370 {
6371 struct mgmt_ev_pin_code_request ev;
6372
6373 bacpy(&ev.addr.bdaddr, bdaddr);
6374 ev.addr.type = BDADDR_BREDR;
6375 ev.secure = secure;
6376
6377 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6378 }
6379
6380 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6381 u8 status)
6382 {
6383 struct pending_cmd *cmd;
6384 struct mgmt_rp_pin_code_reply rp;
6385
6386 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6387 if (!cmd)
6388 return;
6389
6390 bacpy(&rp.addr.bdaddr, bdaddr);
6391 rp.addr.type = BDADDR_BREDR;
6392
6393 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6394 mgmt_status(status), &rp, sizeof(rp));
6395
6396 mgmt_pending_remove(cmd);
6397 }
6398
6399 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6400 u8 status)
6401 {
6402 struct pending_cmd *cmd;
6403 struct mgmt_rp_pin_code_reply rp;
6404
6405 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6406 if (!cmd)
6407 return;
6408
6409 bacpy(&rp.addr.bdaddr, bdaddr);
6410 rp.addr.type = BDADDR_BREDR;
6411
6412 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6413 mgmt_status(status), &rp, sizeof(rp));
6414
6415 mgmt_pending_remove(cmd);
6416 }
6417
6418 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6419 u8 link_type, u8 addr_type, u32 value,
6420 u8 confirm_hint)
6421 {
6422 struct mgmt_ev_user_confirm_request ev;
6423
6424 BT_DBG("%s", hdev->name);
6425
6426 bacpy(&ev.addr.bdaddr, bdaddr);
6427 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6428 ev.confirm_hint = confirm_hint;
6429 ev.value = cpu_to_le32(value);
6430
6431 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6432 NULL);
6433 }
6434
6435 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6436 u8 link_type, u8 addr_type)
6437 {
6438 struct mgmt_ev_user_passkey_request ev;
6439
6440 BT_DBG("%s", hdev->name);
6441
6442 bacpy(&ev.addr.bdaddr, bdaddr);
6443 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6444
6445 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6446 NULL);
6447 }
6448
6449 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6450 u8 link_type, u8 addr_type, u8 status,
6451 u8 opcode)
6452 {
6453 struct pending_cmd *cmd;
6454 struct mgmt_rp_user_confirm_reply rp;
6455 int err;
6456
6457 cmd = mgmt_pending_find(opcode, hdev);
6458 if (!cmd)
6459 return -ENOENT;
6460
6461 bacpy(&rp.addr.bdaddr, bdaddr);
6462 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6463 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6464 &rp, sizeof(rp));
6465
6466 mgmt_pending_remove(cmd);
6467
6468 return err;
6469 }
6470
6471 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6472 u8 link_type, u8 addr_type, u8 status)
6473 {
6474 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6475 status, MGMT_OP_USER_CONFIRM_REPLY);
6476 }
6477
6478 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6479 u8 link_type, u8 addr_type, u8 status)
6480 {
6481 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6482 status,
6483 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6484 }
6485
6486 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6487 u8 link_type, u8 addr_type, u8 status)
6488 {
6489 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6490 status, MGMT_OP_USER_PASSKEY_REPLY);
6491 }
6492
6493 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 u8 link_type, u8 addr_type, u8 status)
6495 {
6496 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6497 status,
6498 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6499 }
6500
6501 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6502 u8 link_type, u8 addr_type, u32 passkey,
6503 u8 entered)
6504 {
6505 struct mgmt_ev_passkey_notify ev;
6506
6507 BT_DBG("%s", hdev->name);
6508
6509 bacpy(&ev.addr.bdaddr, bdaddr);
6510 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6511 ev.passkey = __cpu_to_le32(passkey);
6512 ev.entered = entered;
6513
6514 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6515 }
6516
6517 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6518 u8 addr_type, u8 status)
6519 {
6520 struct mgmt_ev_auth_failed ev;
6521
6522 bacpy(&ev.addr.bdaddr, bdaddr);
6523 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6524 ev.status = mgmt_status(status);
6525
6526 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6527 }
6528
6529 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6530 {
6531 struct cmd_lookup match = { NULL, hdev };
6532 bool changed;
6533
6534 if (status) {
6535 u8 mgmt_err = mgmt_status(status);
6536 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6537 cmd_status_rsp, &mgmt_err);
6538 return;
6539 }
6540
6541 if (test_bit(HCI_AUTH, &hdev->flags))
6542 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6543 &hdev->dev_flags);
6544 else
6545 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6546 &hdev->dev_flags);
6547
6548 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6549 &match);
6550
6551 if (changed)
6552 new_settings(hdev, match.sk);
6553
6554 if (match.sk)
6555 sock_put(match.sk);
6556 }
6557
6558 static void clear_eir(struct hci_request *req)
6559 {
6560 struct hci_dev *hdev = req->hdev;
6561 struct hci_cp_write_eir cp;
6562
6563 if (!lmp_ext_inq_capable(hdev))
6564 return;
6565
6566 memset(hdev->eir, 0, sizeof(hdev->eir));
6567
6568 memset(&cp, 0, sizeof(cp));
6569
6570 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6571 }
6572
6573 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6574 {
6575 struct cmd_lookup match = { NULL, hdev };
6576 struct hci_request req;
6577 bool changed = false;
6578
6579 if (status) {
6580 u8 mgmt_err = mgmt_status(status);
6581
6582 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6583 &hdev->dev_flags)) {
6584 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6585 new_settings(hdev, NULL);
6586 }
6587
6588 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6589 &mgmt_err);
6590 return;
6591 }
6592
6593 if (enable) {
6594 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6595 } else {
6596 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6597 if (!changed)
6598 changed = test_and_clear_bit(HCI_HS_ENABLED,
6599 &hdev->dev_flags);
6600 else
6601 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6602 }
6603
6604 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6605
6606 if (changed)
6607 new_settings(hdev, match.sk);
6608
6609 if (match.sk)
6610 sock_put(match.sk);
6611
6612 hci_req_init(&req, hdev);
6613
6614 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6615 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6616 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6617 sizeof(enable), &enable);
6618 update_eir(&req);
6619 } else {
6620 clear_eir(&req);
6621 }
6622
6623 hci_req_run(&req, NULL);
6624 }
6625
6626 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6627 {
6628 struct cmd_lookup match = { NULL, hdev };
6629 bool changed = false;
6630
6631 if (status) {
6632 u8 mgmt_err = mgmt_status(status);
6633
6634 if (enable) {
6635 if (test_and_clear_bit(HCI_SC_ENABLED,
6636 &hdev->dev_flags))
6637 new_settings(hdev, NULL);
6638 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6639 }
6640
6641 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6642 cmd_status_rsp, &mgmt_err);
6643 return;
6644 }
6645
6646 if (enable) {
6647 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6648 } else {
6649 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6650 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6651 }
6652
6653 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6654 settings_rsp, &match);
6655
6656 if (changed)
6657 new_settings(hdev, match.sk);
6658
6659 if (match.sk)
6660 sock_put(match.sk);
6661 }
6662
6663 static void sk_lookup(struct pending_cmd *cmd, void *data)
6664 {
6665 struct cmd_lookup *match = data;
6666
6667 if (match->sk == NULL) {
6668 match->sk = cmd->sk;
6669 sock_hold(match->sk);
6670 }
6671 }
6672
6673 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6674 u8 status)
6675 {
6676 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6677
6678 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6679 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6680 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6681
6682 if (!status)
6683 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6684 NULL);
6685
6686 if (match.sk)
6687 sock_put(match.sk);
6688 }
6689
6690 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6691 {
6692 struct mgmt_cp_set_local_name ev;
6693 struct pending_cmd *cmd;
6694
6695 if (status)
6696 return;
6697
6698 memset(&ev, 0, sizeof(ev));
6699 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6700 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6701
6702 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6703 if (!cmd) {
6704 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6705
6706 /* If this is a HCI command related to powering on the
6707 * HCI dev don't send any mgmt signals.
6708 */
6709 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6710 return;
6711 }
6712
6713 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6714 cmd ? cmd->sk : NULL);
6715 }
6716
6717 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6718 u8 *randomizer192, u8 *hash256,
6719 u8 *randomizer256, u8 status)
6720 {
6721 struct pending_cmd *cmd;
6722
6723 BT_DBG("%s status %u", hdev->name, status);
6724
6725 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6726 if (!cmd)
6727 return;
6728
6729 if (status) {
6730 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6731 mgmt_status(status));
6732 } else {
6733 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6734 hash256 && randomizer256) {
6735 struct mgmt_rp_read_local_oob_ext_data rp;
6736
6737 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6738 memcpy(rp.randomizer192, randomizer192,
6739 sizeof(rp.randomizer192));
6740
6741 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6742 memcpy(rp.randomizer256, randomizer256,
6743 sizeof(rp.randomizer256));
6744
6745 cmd_complete(cmd->sk, hdev->id,
6746 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6747 &rp, sizeof(rp));
6748 } else {
6749 struct mgmt_rp_read_local_oob_data rp;
6750
6751 memcpy(rp.hash, hash192, sizeof(rp.hash));
6752 memcpy(rp.randomizer, randomizer192,
6753 sizeof(rp.randomizer));
6754
6755 cmd_complete(cmd->sk, hdev->id,
6756 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6757 &rp, sizeof(rp));
6758 }
6759 }
6760
6761 mgmt_pending_remove(cmd);
6762 }
6763
6764 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6765 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6766 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6767 {
6768 char buf[512];
6769 struct mgmt_ev_device_found *ev = (void *) buf;
6770 size_t ev_size;
6771
6772 /* Don't send events for a non-kernel initiated discovery. With
6773 * LE one exception is if we have pend_le_reports > 0 in which
6774 * case we're doing passive scanning and want these events.
6775 */
6776 if (!hci_discovery_active(hdev)) {
6777 if (link_type == ACL_LINK)
6778 return;
6779 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6780 return;
6781 }
6782
6783 /* Make sure that the buffer is big enough. The 5 extra bytes
6784 * are for the potential CoD field.
6785 */
6786 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6787 return;
6788
6789 memset(buf, 0, sizeof(buf));
6790
6791 bacpy(&ev->addr.bdaddr, bdaddr);
6792 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6793 ev->rssi = rssi;
6794 ev->flags = cpu_to_le32(flags);
6795
6796 if (eir_len > 0)
6797 memcpy(ev->eir, eir, eir_len);
6798
6799 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6800 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6801 dev_class, 3);
6802
6803 if (scan_rsp_len > 0)
6804 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6805
6806 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6807 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6808
6809 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6810 }
6811
6812 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6813 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6814 {
6815 struct mgmt_ev_device_found *ev;
6816 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6817 u16 eir_len;
6818
6819 ev = (struct mgmt_ev_device_found *) buf;
6820
6821 memset(buf, 0, sizeof(buf));
6822
6823 bacpy(&ev->addr.bdaddr, bdaddr);
6824 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6825 ev->rssi = rssi;
6826
6827 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6828 name_len);
6829
6830 ev->eir_len = cpu_to_le16(eir_len);
6831
6832 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6833 }
6834
6835 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6836 {
6837 struct mgmt_ev_discovering ev;
6838 struct pending_cmd *cmd;
6839
6840 BT_DBG("%s discovering %u", hdev->name, discovering);
6841
6842 if (discovering)
6843 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6844 else
6845 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6846
6847 if (cmd != NULL) {
6848 u8 type = hdev->discovery.type;
6849
6850 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6851 sizeof(type));
6852 mgmt_pending_remove(cmd);
6853 }
6854
6855 memset(&ev, 0, sizeof(ev));
6856 ev.type = hdev->discovery.type;
6857 ev.discovering = discovering;
6858
6859 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6860 }
6861
6862 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6863 {
6864 BT_DBG("%s status %u", hdev->name, status);
6865 }
6866
6867 void mgmt_reenable_advertising(struct hci_dev *hdev)
6868 {
6869 struct hci_request req;
6870
6871 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6872 return;
6873
6874 hci_req_init(&req, hdev);
6875 enable_advertising(&req);
6876 hci_req_run(&req, adv_enable_complete);
6877 }
This page took 0.277964 seconds and 5 git commands to generate.