Bluetooth: Add support for limited privacy mode
[deliverable/linux.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
b1a8917c
JH
24#include <asm/unaligned.h>
25
0857dd3b
JH
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
f2252570 28#include <net/bluetooth/mgmt.h>
0857dd3b
JH
29
30#include "smp.h"
31#include "hci_request.h"
32
be91cd05
JH
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
0857dd3b
JH
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
e6214487
JH
44static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
46{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
66 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
0857dd3b
JH
72
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
e6214487
JH
82int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
be91cd05
JH
92static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
b504430c 106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189/* Execute request and wait for completion. */
a1d01db1
JH
190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
4ebeee2d 192 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
a1d01db1
JH
204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
be91cd05
JH
210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
568f44f6
JH
226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
be91cd05 229 return 0;
568f44f6
JH
230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
248 if (hci_status)
249 *hci_status = hdev->req_result;
be91cd05
JH
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
4ebeee2d
JH
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
256 break;
257
258 default:
259 err = -ETIMEDOUT;
4ebeee2d
JH
260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
a1d01db1
JH
272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
4ebeee2d 274 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
b504430c 282 hci_req_sync_lock(hdev);
4ebeee2d 283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 284 hci_req_sync_unlock(hdev);
be91cd05
JH
285
286 return ret;
287}
288
0857dd3b
JH
289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
d79f34e3
MH
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
44d27137 339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 340
242c0ebd 341 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
bf943cbf
JH
352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
196a5e97
JH
387/* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
00cf5040
JH
460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
b1a8917c
JH
470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
0857dd3b
JH
657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, &params->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
689 */
690 list_for_each_entry(b, &hdev->le_white_list, list) {
cff10ce7
JH
691 /* If the device is neither in pend_le_conns nor
692 * pend_le_reports then remove it from the whitelist.
693 */
694 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695 &b->bdaddr, b->bdaddr_type) &&
696 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697 &b->bdaddr, b->bdaddr_type)) {
698 struct hci_cp_le_del_from_white_list cp;
699
700 cp.bdaddr_type = b->bdaddr_type;
701 bacpy(&cp.bdaddr, &b->bdaddr);
0857dd3b 702
cff10ce7
JH
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704 sizeof(cp), &cp);
0857dd3b
JH
705 continue;
706 }
707
cff10ce7
JH
708 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709 /* White list can not be used with RPAs */
710 return 0x00;
711 }
0857dd3b 712
cff10ce7 713 white_list_entries++;
0857dd3b
JH
714 }
715
716 /* Since all no longer valid white list entries have been
717 * removed, walk through the list of pending connections
718 * and ensure that any new device gets programmed into
719 * the controller.
720 *
721 * If the list of the devices is larger than the list of
722 * available white list entries in the controller, then
723 * just abort and return filer policy value to not use the
724 * white list.
725 */
726 list_for_each_entry(params, &hdev->pend_le_conns, action) {
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728 &params->addr, params->addr_type))
729 continue;
730
731 if (white_list_entries >= hdev->le_white_list_size) {
732 /* Select filter policy to accept all advertising */
733 return 0x00;
734 }
735
736 if (hci_find_irk_by_addr(hdev, &params->addr,
737 params->addr_type)) {
738 /* White list can not be used with RPAs */
739 return 0x00;
740 }
741
742 white_list_entries++;
743 add_to_white_list(req, params);
744 }
745
746 /* After adding all new pending connections, walk through
747 * the list of pending reports and also add these to the
748 * white list if there is still space.
749 */
750 list_for_each_entry(params, &hdev->pend_le_reports, action) {
751 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752 &params->addr, params->addr_type))
753 continue;
754
755 if (white_list_entries >= hdev->le_white_list_size) {
756 /* Select filter policy to accept all advertising */
757 return 0x00;
758 }
759
760 if (hci_find_irk_by_addr(hdev, &params->addr,
761 params->addr_type)) {
762 /* White list can not be used with RPAs */
763 return 0x00;
764 }
765
766 white_list_entries++;
767 add_to_white_list(req, params);
768 }
769
770 /* Select filter policy to use white list */
771 return 0x01;
772}
773
82a37ade
JH
774static bool scan_use_rpa(struct hci_dev *hdev)
775{
776 return hci_dev_test_flag(hdev, HCI_PRIVACY);
777}
778
0857dd3b
JH
779void hci_req_add_le_passive_scan(struct hci_request *req)
780{
781 struct hci_cp_le_set_scan_param param_cp;
782 struct hci_cp_le_set_scan_enable enable_cp;
783 struct hci_dev *hdev = req->hdev;
784 u8 own_addr_type;
785 u8 filter_policy;
786
787 /* Set require_privacy to false since no SCAN_REQ are send
788 * during passive scanning. Not using an non-resolvable address
789 * here is important so that peer devices using direct
790 * advertising with our address will be correctly reported
791 * by the controller.
792 */
82a37ade
JH
793 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794 &own_addr_type))
0857dd3b
JH
795 return;
796
797 /* Adding or removing entries from the white list must
798 * happen before enabling scanning. The controller does
799 * not allow white list modification while scanning.
800 */
801 filter_policy = update_white_list(req);
802
803 /* When the controller is using random resolvable addresses and
804 * with that having LE privacy enabled, then controllers with
805 * Extended Scanner Filter Policies support can now enable support
806 * for handling directed advertising.
807 *
808 * So instead of using filter polices 0x00 (no whitelist)
809 * and 0x01 (whitelist enabled) use the new filter policies
810 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
811 */
d7a5a11d 812 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
813 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814 filter_policy |= 0x02;
815
816 memset(&param_cp, 0, sizeof(param_cp));
817 param_cp.type = LE_SCAN_PASSIVE;
818 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819 param_cp.window = cpu_to_le16(hdev->le_scan_window);
820 param_cp.own_address_type = own_addr_type;
821 param_cp.filter_policy = filter_policy;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823 &param_cp);
824
825 memset(&enable_cp, 0, sizeof(enable_cp));
826 enable_cp.enable = LE_SCAN_ENABLE;
827 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829 &enable_cp);
830}
831
f2252570
JH
832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
cab054ab 834 u8 instance = hdev->cur_adv_instance;
f2252570
JH
835 struct adv_info *adv_instance;
836
837 /* Ignore instance 0 */
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845 /* TODO: Take into account the "appearance" and "local-name" flags here.
846 * These are currently being ignored as they are not supported.
847 */
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864 /* Instance 0 always manages the "Tx Power" and "Flags"
865 * fields
866 */
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870 * corresponds to the "connectable" instance flag.
871 */
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
d43efbd0
JH
875 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 flags |= MGMT_ADV_FLAG_DISCOV;
877
f2252570
JH
878 return flags;
879 }
880
881 adv_instance = hci_find_adv_instance(hdev, instance);
882
883 /* Return 0 when we got an invalid instance identifier. */
884 if (!adv_instance)
885 return 0;
886
887 return adv_instance->flags;
888}
889
82a37ade
JH
890static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
891{
892 /* If privacy is not enabled don't use RPA */
893 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
894 return false;
895
896 /* If basic privacy mode is enabled use RPA */
897 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
898 return true;
899
900 /* If limited privacy mode is enabled don't use RPA if we're
901 * both discoverable and bondable.
902 */
903 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
904 hci_dev_test_flag(hdev, HCI_BONDABLE))
905 return false;
906
907 /* We're neither bondable nor discoverable in the limited
908 * privacy mode, therefore use RPA.
909 */
910 return true;
911}
912
f2252570
JH
913void __hci_req_enable_advertising(struct hci_request *req)
914{
915 struct hci_dev *hdev = req->hdev;
916 struct hci_cp_le_set_adv_param cp;
917 u8 own_addr_type, enable = 0x01;
918 bool connectable;
f2252570
JH
919 u32 flags;
920
921 if (hci_conn_num(hdev, LE_LINK) > 0)
922 return;
923
924 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
925 __hci_req_disable_advertising(req);
926
927 /* Clear the HCI_LE_ADV bit temporarily so that the
928 * hci_update_random_address knows that it's safe to go ahead
929 * and write a new random address. The flag will be set back on
930 * as soon as the SET_ADV_ENABLE HCI command completes.
931 */
932 hci_dev_clear_flag(hdev, HCI_LE_ADV);
933
cab054ab 934 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
f2252570
JH
935
936 /* If the "connectable" instance flag was not set, then choose between
937 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
938 */
939 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
940 mgmt_get_connectable(hdev);
941
942 /* Set require_privacy to true only when non-connectable
943 * advertising is used. In that case it is fine to use a
944 * non-resolvable private address.
945 */
82a37ade
JH
946 if (hci_update_random_address(req, !connectable,
947 adv_use_rpa(hdev, flags),
948 &own_addr_type) < 0)
f2252570
JH
949 return;
950
951 memset(&cp, 0, sizeof(cp));
952 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
953 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
954
955 if (connectable)
956 cp.type = LE_ADV_IND;
957 else if (get_cur_adv_instance_scan_rsp_len(hdev))
958 cp.type = LE_ADV_SCAN_IND;
959 else
960 cp.type = LE_ADV_NONCONN_IND;
961
962 cp.own_address_type = own_addr_type;
963 cp.channel_map = hdev->le_adv_channel_map;
964
965 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
966
967 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
968}
969
970static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
971{
972 u8 ad_len = 0;
973 size_t name_len;
974
975 name_len = strlen(hdev->dev_name);
976 if (name_len > 0) {
977 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
978
979 if (name_len > max_len) {
980 name_len = max_len;
981 ptr[1] = EIR_NAME_SHORT;
982 } else
983 ptr[1] = EIR_NAME_COMPLETE;
984
985 ptr[0] = name_len + 1;
986
987 memcpy(ptr + 2, hdev->dev_name, name_len);
988
989 ad_len += (name_len + 2);
990 ptr += (name_len + 2);
991 }
992
993 return ad_len;
994}
995
996static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
997 u8 *ptr)
998{
999 struct adv_info *adv_instance;
1000
1001 adv_instance = hci_find_adv_instance(hdev, instance);
1002 if (!adv_instance)
1003 return 0;
1004
1005 /* TODO: Set the appropriate entries based on advertising instance flags
1006 * here once flags other than 0 are supported.
1007 */
1008 memcpy(ptr, adv_instance->scan_rsp_data,
1009 adv_instance->scan_rsp_len);
1010
1011 return adv_instance->scan_rsp_len;
1012}
1013
cab054ab 1014void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
f2252570
JH
1015{
1016 struct hci_dev *hdev = req->hdev;
1017 struct hci_cp_le_set_scan_rsp_data cp;
1018 u8 len;
1019
1020 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1021 return;
1022
1023 memset(&cp, 0, sizeof(cp));
1024
1025 if (instance)
1026 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1027 else
1028 len = create_default_scan_rsp_data(hdev, cp.data);
1029
1030 if (hdev->scan_rsp_data_len == len &&
1031 !memcmp(cp.data, hdev->scan_rsp_data, len))
1032 return;
1033
1034 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1035 hdev->scan_rsp_data_len = len;
1036
1037 cp.length = len;
1038
1039 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1040}
1041
f2252570
JH
1042static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1043{
1044 struct adv_info *adv_instance = NULL;
1045 u8 ad_len = 0, flags = 0;
1046 u32 instance_flags;
1047
1048 /* Return 0 when the current instance identifier is invalid. */
1049 if (instance) {
1050 adv_instance = hci_find_adv_instance(hdev, instance);
1051 if (!adv_instance)
1052 return 0;
1053 }
1054
1055 instance_flags = get_adv_instance_flags(hdev, instance);
1056
1057 /* The Add Advertising command allows userspace to set both the general
1058 * and limited discoverable flags.
1059 */
1060 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1061 flags |= LE_AD_GENERAL;
1062
1063 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1064 flags |= LE_AD_LIMITED;
1065
1066 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1067 /* If a discovery flag wasn't provided, simply use the global
1068 * settings.
1069 */
1070 if (!flags)
1071 flags |= mgmt_get_adv_discov_flags(hdev);
1072
1073 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1074 flags |= LE_AD_NO_BREDR;
1075
1076 /* If flags would still be empty, then there is no need to
1077 * include the "Flags" AD field".
1078 */
1079 if (flags) {
1080 ptr[0] = 0x02;
1081 ptr[1] = EIR_FLAGS;
1082 ptr[2] = flags;
1083
1084 ad_len += 3;
1085 ptr += 3;
1086 }
1087 }
1088
1089 if (adv_instance) {
1090 memcpy(ptr, adv_instance->adv_data,
1091 adv_instance->adv_data_len);
1092 ad_len += adv_instance->adv_data_len;
1093 ptr += adv_instance->adv_data_len;
1094 }
1095
1096 /* Provide Tx Power only if we can provide a valid value for it */
1097 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1098 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1099 ptr[0] = 0x02;
1100 ptr[1] = EIR_TX_POWER;
1101 ptr[2] = (u8)hdev->adv_tx_power;
1102
1103 ad_len += 3;
1104 ptr += 3;
1105 }
1106
1107 return ad_len;
1108}
1109
cab054ab 1110void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
f2252570
JH
1111{
1112 struct hci_dev *hdev = req->hdev;
1113 struct hci_cp_le_set_adv_data cp;
1114 u8 len;
1115
1116 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1117 return;
1118
1119 memset(&cp, 0, sizeof(cp));
1120
1121 len = create_instance_adv_data(hdev, instance, cp.data);
1122
1123 /* There's nothing to do if the data hasn't changed */
1124 if (hdev->adv_data_len == len &&
1125 memcmp(cp.data, hdev->adv_data, len) == 0)
1126 return;
1127
1128 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1129 hdev->adv_data_len = len;
1130
1131 cp.length = len;
1132
1133 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1134}
1135
cab054ab 1136int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
f2252570
JH
1137{
1138 struct hci_request req;
1139
1140 hci_req_init(&req, hdev);
1141 __hci_req_update_adv_data(&req, instance);
1142
1143 return hci_req_run(&req, NULL);
1144}
1145
1146static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1147{
1148 BT_DBG("%s status %u", hdev->name, status);
1149}
1150
1151void hci_req_reenable_advertising(struct hci_dev *hdev)
1152{
1153 struct hci_request req;
f2252570
JH
1154
1155 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
17fd08ff 1156 list_empty(&hdev->adv_instances))
f2252570
JH
1157 return;
1158
f2252570
JH
1159 hci_req_init(&req, hdev);
1160
cab054ab
JH
1161 if (hdev->cur_adv_instance) {
1162 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1163 true);
f2252570 1164 } else {
cab054ab
JH
1165 __hci_req_update_adv_data(&req, 0x00);
1166 __hci_req_update_scan_rsp_data(&req, 0x00);
f2252570
JH
1167 __hci_req_enable_advertising(&req);
1168 }
1169
1170 hci_req_run(&req, adv_enable_complete);
1171}
1172
1173static void adv_timeout_expire(struct work_struct *work)
1174{
1175 struct hci_dev *hdev = container_of(work, struct hci_dev,
1176 adv_instance_expire.work);
1177
1178 struct hci_request req;
1179 u8 instance;
1180
1181 BT_DBG("%s", hdev->name);
1182
1183 hci_dev_lock(hdev);
1184
1185 hdev->adv_instance_timeout = 0;
1186
cab054ab 1187 instance = hdev->cur_adv_instance;
f2252570
JH
1188 if (instance == 0x00)
1189 goto unlock;
1190
1191 hci_req_init(&req, hdev);
1192
1193 hci_req_clear_adv_instance(hdev, &req, instance, false);
1194
1195 if (list_empty(&hdev->adv_instances))
1196 __hci_req_disable_advertising(&req);
1197
550a8ca7 1198 hci_req_run(&req, NULL);
f2252570
JH
1199
1200unlock:
1201 hci_dev_unlock(hdev);
1202}
1203
1204int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1205 bool force)
1206{
1207 struct hci_dev *hdev = req->hdev;
1208 struct adv_info *adv_instance = NULL;
1209 u16 timeout;
1210
1211 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
17fd08ff 1212 list_empty(&hdev->adv_instances))
f2252570
JH
1213 return -EPERM;
1214
1215 if (hdev->adv_instance_timeout)
1216 return -EBUSY;
1217
1218 adv_instance = hci_find_adv_instance(hdev, instance);
1219 if (!adv_instance)
1220 return -ENOENT;
1221
1222 /* A zero timeout means unlimited advertising. As long as there is
1223 * only one instance, duration should be ignored. We still set a timeout
1224 * in case further instances are being added later on.
1225 *
1226 * If the remaining lifetime of the instance is more than the duration
1227 * then the timeout corresponds to the duration, otherwise it will be
1228 * reduced to the remaining instance lifetime.
1229 */
1230 if (adv_instance->timeout == 0 ||
1231 adv_instance->duration <= adv_instance->remaining_time)
1232 timeout = adv_instance->duration;
1233 else
1234 timeout = adv_instance->remaining_time;
1235
1236 /* The remaining time is being reduced unless the instance is being
1237 * advertised without time limit.
1238 */
1239 if (adv_instance->timeout)
1240 adv_instance->remaining_time =
1241 adv_instance->remaining_time - timeout;
1242
1243 hdev->adv_instance_timeout = timeout;
1244 queue_delayed_work(hdev->req_workqueue,
1245 &hdev->adv_instance_expire,
1246 msecs_to_jiffies(timeout * 1000));
1247
1248 /* If we're just re-scheduling the same instance again then do not
1249 * execute any HCI commands. This happens when a single instance is
1250 * being advertised.
1251 */
1252 if (!force && hdev->cur_adv_instance == instance &&
1253 hci_dev_test_flag(hdev, HCI_LE_ADV))
1254 return 0;
1255
1256 hdev->cur_adv_instance = instance;
cab054ab
JH
1257 __hci_req_update_adv_data(req, instance);
1258 __hci_req_update_scan_rsp_data(req, instance);
f2252570
JH
1259 __hci_req_enable_advertising(req);
1260
1261 return 0;
1262}
1263
1264static void cancel_adv_timeout(struct hci_dev *hdev)
1265{
1266 if (hdev->adv_instance_timeout) {
1267 hdev->adv_instance_timeout = 0;
1268 cancel_delayed_work(&hdev->adv_instance_expire);
1269 }
1270}
1271
1272/* For a single instance:
1273 * - force == true: The instance will be removed even when its remaining
1274 * lifetime is not zero.
1275 * - force == false: the instance will be deactivated but kept stored unless
1276 * the remaining lifetime is zero.
1277 *
1278 * For instance == 0x00:
1279 * - force == true: All instances will be removed regardless of their timeout
1280 * setting.
1281 * - force == false: Only instances that have a timeout will be removed.
1282 */
1283void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1284 u8 instance, bool force)
1285{
1286 struct adv_info *adv_instance, *n, *next_instance = NULL;
1287 int err;
1288 u8 rem_inst;
1289
1290 /* Cancel any timeout concerning the removed instance(s). */
1291 if (!instance || hdev->cur_adv_instance == instance)
1292 cancel_adv_timeout(hdev);
1293
1294 /* Get the next instance to advertise BEFORE we remove
1295 * the current one. This can be the same instance again
1296 * if there is only one instance.
1297 */
1298 if (instance && hdev->cur_adv_instance == instance)
1299 next_instance = hci_get_next_instance(hdev, instance);
1300
1301 if (instance == 0x00) {
1302 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1303 list) {
1304 if (!(force || adv_instance->timeout))
1305 continue;
1306
1307 rem_inst = adv_instance->instance;
1308 err = hci_remove_adv_instance(hdev, rem_inst);
1309 if (!err)
1310 mgmt_advertising_removed(NULL, hdev, rem_inst);
1311 }
f2252570
JH
1312 } else {
1313 adv_instance = hci_find_adv_instance(hdev, instance);
1314
1315 if (force || (adv_instance && adv_instance->timeout &&
1316 !adv_instance->remaining_time)) {
1317 /* Don't advertise a removed instance. */
1318 if (next_instance &&
1319 next_instance->instance == instance)
1320 next_instance = NULL;
1321
1322 err = hci_remove_adv_instance(hdev, instance);
1323 if (!err)
1324 mgmt_advertising_removed(NULL, hdev, instance);
1325 }
1326 }
1327
f2252570
JH
1328 if (!req || !hdev_is_powered(hdev) ||
1329 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1330 return;
1331
1332 if (next_instance)
1333 __hci_req_schedule_adv_instance(req, next_instance->instance,
1334 false);
1335}
1336
0857dd3b
JH
1337static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1338{
1339 struct hci_dev *hdev = req->hdev;
1340
1341 /* If we're advertising or initiating an LE connection we can't
1342 * go ahead and change the random address at this time. This is
1343 * because the eventual initiator address used for the
1344 * subsequently created connection will be undefined (some
1345 * controllers use the new address and others the one we had
1346 * when the operation started).
1347 *
1348 * In this kind of scenario skip the update and let the random
1349 * address be updated at the next cycle.
1350 */
d7a5a11d 1351 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 1352 hci_lookup_le_connect(hdev)) {
0857dd3b 1353 BT_DBG("Deferring random address update");
a1536da2 1354 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
1355 return;
1356 }
1357
1358 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1359}
1360
1361int hci_update_random_address(struct hci_request *req, bool require_privacy,
82a37ade 1362 bool use_rpa, u8 *own_addr_type)
0857dd3b
JH
1363{
1364 struct hci_dev *hdev = req->hdev;
1365 int err;
1366
1367 /* If privacy is enabled use a resolvable private address. If
1368 * current RPA has expired or there is something else than
1369 * the current RPA in use, then generate a new one.
1370 */
82a37ade 1371 if (use_rpa) {
0857dd3b
JH
1372 int to;
1373
1374 *own_addr_type = ADDR_LE_DEV_RANDOM;
1375
a69d8927 1376 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
1377 !bacmp(&hdev->random_addr, &hdev->rpa))
1378 return 0;
1379
1380 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1381 if (err < 0) {
1382 BT_ERR("%s failed to generate new RPA", hdev->name);
1383 return err;
1384 }
1385
1386 set_random_addr(req, &hdev->rpa);
1387
1388 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1389 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1390
1391 return 0;
1392 }
1393
1394 /* In case of required privacy without resolvable private address,
1395 * use an non-resolvable private address. This is useful for active
1396 * scanning and non-connectable advertising.
1397 */
1398 if (require_privacy) {
1399 bdaddr_t nrpa;
1400
1401 while (true) {
1402 /* The non-resolvable private address is generated
1403 * from random six bytes with the two most significant
1404 * bits cleared.
1405 */
1406 get_random_bytes(&nrpa, 6);
1407 nrpa.b[5] &= 0x3f;
1408
1409 /* The non-resolvable private address shall not be
1410 * equal to the public address.
1411 */
1412 if (bacmp(&hdev->bdaddr, &nrpa))
1413 break;
1414 }
1415
1416 *own_addr_type = ADDR_LE_DEV_RANDOM;
1417 set_random_addr(req, &nrpa);
1418 return 0;
1419 }
1420
1421 /* If forcing static address is in use or there is no public
1422 * address use the static address as random address (but skip
1423 * the HCI command if the current random address is already the
1424 * static one.
50b5b952
MH
1425 *
1426 * In case BR/EDR has been disabled on a dual-mode controller
1427 * and a static address has been configured, then use that
1428 * address instead of the public BR/EDR address.
0857dd3b 1429 */
b7cb93e5 1430 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 1431 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 1432 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 1433 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
1434 *own_addr_type = ADDR_LE_DEV_RANDOM;
1435 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1436 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1437 &hdev->static_addr);
1438 return 0;
1439 }
1440
1441 /* Neither privacy nor static address is being used so use a
1442 * public address.
1443 */
1444 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1445
1446 return 0;
1447}
2cf22218 1448
405a2611
JH
1449static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1450{
1451 struct bdaddr_list *b;
1452
1453 list_for_each_entry(b, &hdev->whitelist, list) {
1454 struct hci_conn *conn;
1455
1456 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1457 if (!conn)
1458 return true;
1459
1460 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1461 return true;
1462 }
1463
1464 return false;
1465}
1466
01b1cb87 1467void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
1468{
1469 struct hci_dev *hdev = req->hdev;
1470 u8 scan;
1471
d7a5a11d 1472 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
1473 return;
1474
1475 if (!hdev_is_powered(hdev))
1476 return;
1477
1478 if (mgmt_powering_down(hdev))
1479 return;
1480
d7a5a11d 1481 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
1482 disconnected_whitelist_entries(hdev))
1483 scan = SCAN_PAGE;
1484 else
1485 scan = SCAN_DISABLED;
1486
d7a5a11d 1487 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
1488 scan |= SCAN_INQUIRY;
1489
01b1cb87
JH
1490 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1491 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1492 return;
1493
405a2611
JH
1494 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1495}
1496
01b1cb87 1497static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 1498{
01b1cb87
JH
1499 hci_dev_lock(req->hdev);
1500 __hci_req_update_scan(req);
1501 hci_dev_unlock(req->hdev);
1502 return 0;
1503}
405a2611 1504
01b1cb87
JH
1505static void scan_update_work(struct work_struct *work)
1506{
1507 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1508
1509 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
1510}
1511
53c0ba74
JH
1512static int connectable_update(struct hci_request *req, unsigned long opt)
1513{
1514 struct hci_dev *hdev = req->hdev;
1515
1516 hci_dev_lock(hdev);
1517
1518 __hci_req_update_scan(req);
1519
1520 /* If BR/EDR is not enabled and we disable advertising as a
1521 * by-product of disabling connectable, we need to update the
1522 * advertising flags.
1523 */
1524 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
cab054ab 1525 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
53c0ba74
JH
1526
1527 /* Update the advertising parameters if necessary */
1528 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
17fd08ff 1529 !list_empty(&hdev->adv_instances))
53c0ba74
JH
1530 __hci_req_enable_advertising(req);
1531
1532 __hci_update_background_scan(req);
1533
1534 hci_dev_unlock(hdev);
1535
1536 return 0;
1537}
1538
1539static void connectable_update_work(struct work_struct *work)
1540{
1541 struct hci_dev *hdev = container_of(work, struct hci_dev,
1542 connectable_update);
1543 u8 status;
1544
1545 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1546 mgmt_set_connectable_complete(hdev, status);
1547}
1548
14bf5eac
JH
1549static u8 get_service_classes(struct hci_dev *hdev)
1550{
1551 struct bt_uuid *uuid;
1552 u8 val = 0;
1553
1554 list_for_each_entry(uuid, &hdev->uuids, list)
1555 val |= uuid->svc_hint;
1556
1557 return val;
1558}
1559
1560void __hci_req_update_class(struct hci_request *req)
1561{
1562 struct hci_dev *hdev = req->hdev;
1563 u8 cod[3];
1564
1565 BT_DBG("%s", hdev->name);
1566
1567 if (!hdev_is_powered(hdev))
1568 return;
1569
1570 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1571 return;
1572
1573 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1574 return;
1575
1576 cod[0] = hdev->minor_class;
1577 cod[1] = hdev->major_class;
1578 cod[2] = get_service_classes(hdev);
1579
1580 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1581 cod[1] |= 0x20;
1582
1583 if (memcmp(cod, hdev->dev_class, 3) == 0)
1584 return;
1585
1586 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1587}
1588
aed1a885
JH
1589static void write_iac(struct hci_request *req)
1590{
1591 struct hci_dev *hdev = req->hdev;
1592 struct hci_cp_write_current_iac_lap cp;
1593
1594 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1595 return;
1596
1597 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1598 /* Limited discoverable mode */
1599 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1600 cp.iac_lap[0] = 0x00; /* LIAC */
1601 cp.iac_lap[1] = 0x8b;
1602 cp.iac_lap[2] = 0x9e;
1603 cp.iac_lap[3] = 0x33; /* GIAC */
1604 cp.iac_lap[4] = 0x8b;
1605 cp.iac_lap[5] = 0x9e;
1606 } else {
1607 /* General discoverable mode */
1608 cp.num_iac = 1;
1609 cp.iac_lap[0] = 0x33; /* GIAC */
1610 cp.iac_lap[1] = 0x8b;
1611 cp.iac_lap[2] = 0x9e;
1612 }
1613
1614 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1615 (cp.num_iac * 3) + 1, &cp);
1616}
1617
1618static int discoverable_update(struct hci_request *req, unsigned long opt)
1619{
1620 struct hci_dev *hdev = req->hdev;
1621
1622 hci_dev_lock(hdev);
1623
1624 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1625 write_iac(req);
1626 __hci_req_update_scan(req);
1627 __hci_req_update_class(req);
1628 }
1629
1630 /* Advertising instances don't use the global discoverable setting, so
1631 * only update AD if advertising was enabled using Set Advertising.
1632 */
82a37ade 1633 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
cab054ab 1634 __hci_req_update_adv_data(req, 0x00);
aed1a885 1635
82a37ade
JH
1636 /* Discoverable mode affects the local advertising
1637 * address in limited privacy mode.
1638 */
1639 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1640 __hci_req_enable_advertising(req);
1641 }
1642
aed1a885
JH
1643 hci_dev_unlock(hdev);
1644
1645 return 0;
1646}
1647
1648static void discoverable_update_work(struct work_struct *work)
1649{
1650 struct hci_dev *hdev = container_of(work, struct hci_dev,
1651 discoverable_update);
1652 u8 status;
1653
1654 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1655 mgmt_set_discoverable_complete(hdev, status);
1656}
1657
dcc0f0d9
JH
1658void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1659 u8 reason)
1660{
1661 switch (conn->state) {
1662 case BT_CONNECTED:
1663 case BT_CONFIG:
1664 if (conn->type == AMP_LINK) {
1665 struct hci_cp_disconn_phy_link cp;
1666
1667 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1668 cp.reason = reason;
1669 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1670 &cp);
1671 } else {
1672 struct hci_cp_disconnect dc;
1673
1674 dc.handle = cpu_to_le16(conn->handle);
1675 dc.reason = reason;
1676 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1677 }
1678
1679 conn->state = BT_DISCONN;
1680
1681 break;
1682 case BT_CONNECT:
1683 if (conn->type == LE_LINK) {
1684 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1685 break;
1686 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1687 0, NULL);
1688 } else if (conn->type == ACL_LINK) {
1689 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1690 break;
1691 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1692 6, &conn->dst);
1693 }
1694 break;
1695 case BT_CONNECT2:
1696 if (conn->type == ACL_LINK) {
1697 struct hci_cp_reject_conn_req rej;
1698
1699 bacpy(&rej.bdaddr, &conn->dst);
1700 rej.reason = reason;
1701
1702 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1703 sizeof(rej), &rej);
1704 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1705 struct hci_cp_reject_sync_conn_req rej;
1706
1707 bacpy(&rej.bdaddr, &conn->dst);
1708
1709 /* SCO rejection has its own limited set of
1710 * allowed error values (0x0D-0x0F) which isn't
1711 * compatible with most values passed to this
1712 * function. To be safe hard-code one of the
1713 * values that's suitable for SCO.
1714 */
1715 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1716
1717 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1718 sizeof(rej), &rej);
1719 }
1720 break;
1721 default:
1722 conn->state = BT_CLOSED;
1723 break;
1724 }
1725}
1726
1727static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1728{
1729 if (status)
1730 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1731}
1732
1733int hci_abort_conn(struct hci_conn *conn, u8 reason)
1734{
1735 struct hci_request req;
1736 int err;
1737
1738 hci_req_init(&req, conn->hdev);
1739
1740 __hci_abort_conn(&req, conn, reason);
1741
1742 err = hci_req_run(&req, abort_conn_complete);
1743 if (err && err != -ENODATA) {
1744 BT_ERR("Failed to run HCI request: err %d", err);
1745 return err;
1746 }
1747
1748 return 0;
1749}
5fc16cc4 1750
a1d01db1 1751static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
1752{
1753 hci_dev_lock(req->hdev);
1754 __hci_update_background_scan(req);
1755 hci_dev_unlock(req->hdev);
a1d01db1 1756 return 0;
2e93e53b
JH
1757}
1758
1759static void bg_scan_update(struct work_struct *work)
1760{
1761 struct hci_dev *hdev = container_of(work, struct hci_dev,
1762 bg_scan_update);
84235d22
JH
1763 struct hci_conn *conn;
1764 u8 status;
1765 int err;
1766
1767 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1768 if (!err)
1769 return;
1770
1771 hci_dev_lock(hdev);
1772
1773 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1774 if (conn)
1775 hci_le_conn_failed(conn, status);
2e93e53b 1776
84235d22 1777 hci_dev_unlock(hdev);
2e93e53b
JH
1778}
1779
f4a2cb4d 1780static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 1781{
f4a2cb4d
JH
1782 hci_req_add_le_scan_disable(req);
1783 return 0;
7c1fbed2
JH
1784}
1785
f4a2cb4d 1786static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 1787{
f4a2cb4d 1788 u8 length = opt;
78b781ca
JH
1789 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1790 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
7c1fbed2 1791 struct hci_cp_inquiry cp;
7c1fbed2 1792
f4a2cb4d 1793 BT_DBG("%s", req->hdev->name);
7c1fbed2 1794
f4a2cb4d
JH
1795 hci_dev_lock(req->hdev);
1796 hci_inquiry_cache_flush(req->hdev);
1797 hci_dev_unlock(req->hdev);
7c1fbed2 1798
f4a2cb4d 1799 memset(&cp, 0, sizeof(cp));
78b781ca
JH
1800
1801 if (req->hdev->discovery.limited)
1802 memcpy(&cp.lap, liac, sizeof(cp.lap));
1803 else
1804 memcpy(&cp.lap, giac, sizeof(cp.lap));
1805
f4a2cb4d 1806 cp.length = length;
7c1fbed2 1807
f4a2cb4d 1808 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 1809
a1d01db1 1810 return 0;
7c1fbed2
JH
1811}
1812
1813static void le_scan_disable_work(struct work_struct *work)
1814{
1815 struct hci_dev *hdev = container_of(work, struct hci_dev,
1816 le_scan_disable.work);
1817 u8 status;
7c1fbed2
JH
1818
1819 BT_DBG("%s", hdev->name);
1820
f4a2cb4d
JH
1821 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1822 return;
1823
7c1fbed2
JH
1824 cancel_delayed_work(&hdev->le_scan_restart);
1825
f4a2cb4d
JH
1826 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1827 if (status) {
1828 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1829 return;
1830 }
1831
1832 hdev->discovery.scan_start = 0;
1833
1834 /* If we were running LE only scan, change discovery state. If
1835 * we were running both LE and BR/EDR inquiry simultaneously,
1836 * and BR/EDR inquiry is already finished, stop discovery,
1837 * otherwise BR/EDR inquiry will stop discovery when finished.
1838 * If we will resolve remote device name, do not change
1839 * discovery state.
1840 */
1841
1842 if (hdev->discovery.type == DISCOV_TYPE_LE)
1843 goto discov_stopped;
1844
1845 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
1846 return;
1847
f4a2cb4d
JH
1848 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1849 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1850 hdev->discovery.state != DISCOVERY_RESOLVING)
1851 goto discov_stopped;
1852
1853 return;
1854 }
1855
1856 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1857 HCI_CMD_TIMEOUT, &status);
1858 if (status) {
1859 BT_ERR("Inquiry failed: status 0x%02x", status);
1860 goto discov_stopped;
1861 }
1862
1863 return;
1864
1865discov_stopped:
1866 hci_dev_lock(hdev);
1867 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1868 hci_dev_unlock(hdev);
7c1fbed2
JH
1869}
1870
3dfe5905
JH
1871static int le_scan_restart(struct hci_request *req, unsigned long opt)
1872{
1873 struct hci_dev *hdev = req->hdev;
1874 struct hci_cp_le_set_scan_enable cp;
1875
1876 /* If controller is not scanning we are done. */
1877 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1878 return 0;
1879
1880 hci_req_add_le_scan_disable(req);
1881
1882 memset(&cp, 0, sizeof(cp));
1883 cp.enable = LE_SCAN_ENABLE;
1884 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1885 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1886
1887 return 0;
1888}
1889
1890static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 1891{
3dfe5905
JH
1892 struct hci_dev *hdev = container_of(work, struct hci_dev,
1893 le_scan_restart.work);
7c1fbed2 1894 unsigned long timeout, duration, scan_start, now;
3dfe5905 1895 u8 status;
7c1fbed2
JH
1896
1897 BT_DBG("%s", hdev->name);
1898
3dfe5905 1899 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2
JH
1900 if (status) {
1901 BT_ERR("Failed to restart LE scan: status %d", status);
1902 return;
1903 }
1904
1905 hci_dev_lock(hdev);
1906
1907 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1908 !hdev->discovery.scan_start)
1909 goto unlock;
1910
1911 /* When the scan was started, hdev->le_scan_disable has been queued
1912 * after duration from scan_start. During scan restart this job
1913 * has been canceled, and we need to queue it again after proper
1914 * timeout, to make sure that scan does not run indefinitely.
1915 */
1916 duration = hdev->discovery.scan_duration;
1917 scan_start = hdev->discovery.scan_start;
1918 now = jiffies;
1919 if (now - scan_start <= duration) {
1920 int elapsed;
1921
1922 if (now >= scan_start)
1923 elapsed = now - scan_start;
1924 else
1925 elapsed = ULONG_MAX - scan_start + now;
1926
1927 timeout = duration - elapsed;
1928 } else {
1929 timeout = 0;
1930 }
1931
1932 queue_delayed_work(hdev->req_workqueue,
1933 &hdev->le_scan_disable, timeout);
1934
1935unlock:
1936 hci_dev_unlock(hdev);
1937}
1938
e68f072b
JH
1939static void disable_advertising(struct hci_request *req)
1940{
1941 u8 enable = 0x00;
1942
1943 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1944}
1945
1946static int active_scan(struct hci_request *req, unsigned long opt)
1947{
1948 uint16_t interval = opt;
1949 struct hci_dev *hdev = req->hdev;
1950 struct hci_cp_le_set_scan_param param_cp;
1951 struct hci_cp_le_set_scan_enable enable_cp;
1952 u8 own_addr_type;
1953 int err;
1954
1955 BT_DBG("%s", hdev->name);
1956
1957 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1958 hci_dev_lock(hdev);
1959
1960 /* Don't let discovery abort an outgoing connection attempt
1961 * that's using directed advertising.
1962 */
1963 if (hci_lookup_le_connect(hdev)) {
1964 hci_dev_unlock(hdev);
1965 return -EBUSY;
1966 }
1967
1968 cancel_adv_timeout(hdev);
1969 hci_dev_unlock(hdev);
1970
1971 disable_advertising(req);
1972 }
1973
1974 /* If controller is scanning, it means the background scanning is
1975 * running. Thus, we should temporarily stop it in order to set the
1976 * discovery scanning parameters.
1977 */
1978 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1979 hci_req_add_le_scan_disable(req);
1980
1981 /* All active scans will be done with either a resolvable private
1982 * address (when privacy feature has been enabled) or non-resolvable
1983 * private address.
1984 */
82a37ade
JH
1985 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
1986 &own_addr_type);
e68f072b
JH
1987 if (err < 0)
1988 own_addr_type = ADDR_LE_DEV_PUBLIC;
1989
1990 memset(&param_cp, 0, sizeof(param_cp));
1991 param_cp.type = LE_SCAN_ACTIVE;
1992 param_cp.interval = cpu_to_le16(interval);
1993 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1994 param_cp.own_address_type = own_addr_type;
1995
1996 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1997 &param_cp);
1998
1999 memset(&enable_cp, 0, sizeof(enable_cp));
2000 enable_cp.enable = LE_SCAN_ENABLE;
2001 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2002
2003 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2004 &enable_cp);
2005
2006 return 0;
2007}
2008
2009static int interleaved_discov(struct hci_request *req, unsigned long opt)
2010{
2011 int err;
2012
2013 BT_DBG("%s", req->hdev->name);
2014
2015 err = active_scan(req, opt);
2016 if (err)
2017 return err;
2018
7df26b56 2019 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
2020}
2021
2022static void start_discovery(struct hci_dev *hdev, u8 *status)
2023{
2024 unsigned long timeout;
2025
2026 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2027
2028 switch (hdev->discovery.type) {
2029 case DISCOV_TYPE_BREDR:
2030 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
2031 hci_req_sync(hdev, bredr_inquiry,
2032 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
2033 status);
2034 return;
2035 case DISCOV_TYPE_INTERLEAVED:
2036 /* When running simultaneous discovery, the LE scanning time
2037 * should occupy the whole discovery time sine BR/EDR inquiry
2038 * and LE scanning are scheduled by the controller.
2039 *
2040 * For interleaving discovery in comparison, BR/EDR inquiry
2041 * and LE scanning are done sequentially with separate
2042 * timeouts.
2043 */
2044 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2045 &hdev->quirks)) {
2046 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2047 /* During simultaneous discovery, we double LE scan
2048 * interval. We must leave some time for the controller
2049 * to do BR/EDR inquiry.
2050 */
2051 hci_req_sync(hdev, interleaved_discov,
2052 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2053 status);
2054 break;
2055 }
2056
2057 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2058 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2059 HCI_CMD_TIMEOUT, status);
2060 break;
2061 case DISCOV_TYPE_LE:
2062 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2063 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2064 HCI_CMD_TIMEOUT, status);
2065 break;
2066 default:
2067 *status = HCI_ERROR_UNSPECIFIED;
2068 return;
2069 }
2070
2071 if (*status)
2072 return;
2073
2074 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2075
2076 /* When service discovery is used and the controller has a
2077 * strict duplicate filter, it is important to remember the
2078 * start and duration of the scan. This is required for
2079 * restarting scanning during the discovery phase.
2080 */
2081 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2082 hdev->discovery.result_filtering) {
2083 hdev->discovery.scan_start = jiffies;
2084 hdev->discovery.scan_duration = timeout;
2085 }
2086
2087 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2088 timeout);
2089}
2090
2154d3f4
JH
2091bool hci_req_stop_discovery(struct hci_request *req)
2092{
2093 struct hci_dev *hdev = req->hdev;
2094 struct discovery_state *d = &hdev->discovery;
2095 struct hci_cp_remote_name_req_cancel cp;
2096 struct inquiry_entry *e;
2097 bool ret = false;
2098
2099 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2100
2101 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2102 if (test_bit(HCI_INQUIRY, &hdev->flags))
2103 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2104
2105 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2106 cancel_delayed_work(&hdev->le_scan_disable);
2107 hci_req_add_le_scan_disable(req);
2108 }
2109
2110 ret = true;
2111 } else {
2112 /* Passive scanning */
2113 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2114 hci_req_add_le_scan_disable(req);
2115 ret = true;
2116 }
2117 }
2118
2119 /* No further actions needed for LE-only discovery */
2120 if (d->type == DISCOV_TYPE_LE)
2121 return ret;
2122
2123 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2124 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2125 NAME_PENDING);
2126 if (!e)
2127 return ret;
2128
2129 bacpy(&cp.bdaddr, &e->data.bdaddr);
2130 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2131 &cp);
2132 ret = true;
2133 }
2134
2135 return ret;
2136}
2137
2138static int stop_discovery(struct hci_request *req, unsigned long opt)
2139{
2140 hci_dev_lock(req->hdev);
2141 hci_req_stop_discovery(req);
2142 hci_dev_unlock(req->hdev);
2143
2144 return 0;
2145}
2146
e68f072b
JH
2147static void discov_update(struct work_struct *work)
2148{
2149 struct hci_dev *hdev = container_of(work, struct hci_dev,
2150 discov_update);
2151 u8 status = 0;
2152
2153 switch (hdev->discovery.state) {
2154 case DISCOVERY_STARTING:
2155 start_discovery(hdev, &status);
2156 mgmt_start_discovery_complete(hdev, status);
2157 if (status)
2158 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2159 else
2160 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2161 break;
2154d3f4
JH
2162 case DISCOVERY_STOPPING:
2163 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2164 mgmt_stop_discovery_complete(hdev, status);
2165 if (!status)
2166 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2167 break;
e68f072b
JH
2168 case DISCOVERY_STOPPED:
2169 default:
2170 return;
2171 }
2172}
2173
c366f555
JH
2174static void discov_off(struct work_struct *work)
2175{
2176 struct hci_dev *hdev = container_of(work, struct hci_dev,
2177 discov_off.work);
2178
2179 BT_DBG("%s", hdev->name);
2180
2181 hci_dev_lock(hdev);
2182
2183 /* When discoverable timeout triggers, then just make sure
2184 * the limited discoverable flag is cleared. Even in the case
2185 * of a timeout triggered from general discoverable, it is
2186 * safe to unconditionally clear the flag.
2187 */
2188 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2189 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2190 hdev->discov_timeout = 0;
2191
2192 hci_dev_unlock(hdev);
2193
2194 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2195 mgmt_new_settings(hdev);
2196}
2197
2ff13894
JH
2198static int powered_update_hci(struct hci_request *req, unsigned long opt)
2199{
2200 struct hci_dev *hdev = req->hdev;
2ff13894
JH
2201 u8 link_sec;
2202
2203 hci_dev_lock(hdev);
2204
2205 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2206 !lmp_host_ssp_capable(hdev)) {
2207 u8 mode = 0x01;
2208
2209 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2210
2211 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2212 u8 support = 0x01;
2213
2214 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2215 sizeof(support), &support);
2216 }
2217 }
2218
2219 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2220 lmp_bredr_capable(hdev)) {
2221 struct hci_cp_write_le_host_supported cp;
2222
2223 cp.le = 0x01;
2224 cp.simul = 0x00;
2225
2226 /* Check first if we already have the right
2227 * host state (host features set)
2228 */
2229 if (cp.le != lmp_host_le_capable(hdev) ||
2230 cp.simul != lmp_host_le_br_capable(hdev))
2231 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2232 sizeof(cp), &cp);
2233 }
2234
d6b7e2cd 2235 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2ff13894
JH
2236 /* Make sure the controller has a good default for
2237 * advertising data. This also applies to the case
2238 * where BR/EDR was toggled during the AUTO_OFF phase.
2239 */
d6b7e2cd
JH
2240 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2241 list_empty(&hdev->adv_instances)) {
2242 __hci_req_update_adv_data(req, 0x00);
2243 __hci_req_update_scan_rsp_data(req, 0x00);
2244
2245 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2246 __hci_req_enable_advertising(req);
2247 } else if (!list_empty(&hdev->adv_instances)) {
2248 struct adv_info *adv_instance;
2ff13894 2249
2ff13894
JH
2250 adv_instance = list_first_entry(&hdev->adv_instances,
2251 struct adv_info, list);
2ff13894 2252 __hci_req_schedule_adv_instance(req,
d6b7e2cd 2253 adv_instance->instance,
2ff13894 2254 true);
d6b7e2cd 2255 }
2ff13894
JH
2256 }
2257
2258 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2259 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2260 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2261 sizeof(link_sec), &link_sec);
2262
2263 if (lmp_bredr_capable(hdev)) {
2264 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2265 __hci_req_write_fast_connectable(req, true);
2266 else
2267 __hci_req_write_fast_connectable(req, false);
2268 __hci_req_update_scan(req);
2269 __hci_req_update_class(req);
2270 __hci_req_update_name(req);
2271 __hci_req_update_eir(req);
2272 }
2273
2274 hci_dev_unlock(hdev);
2275 return 0;
2276}
2277
2278int __hci_req_hci_power_on(struct hci_dev *hdev)
2279{
2280 /* Register the available SMP channels (BR/EDR and LE) only when
2281 * successfully powering on the controller. This late
2282 * registration is required so that LE SMP can clearly decide if
2283 * the public address or static address is used.
2284 */
2285 smp_register(hdev);
2286
2287 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2288 NULL);
2289}
2290
5fc16cc4
JH
2291void hci_request_setup(struct hci_dev *hdev)
2292{
e68f072b 2293 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 2294 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 2295 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 2296 INIT_WORK(&hdev->connectable_update, connectable_update_work);
aed1a885 2297 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
c366f555 2298 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
7c1fbed2
JH
2299 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2300 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 2301 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
2302}
2303
2304void hci_request_cancel_all(struct hci_dev *hdev)
2305{
7df0f73e
JH
2306 hci_req_sync_cancel(hdev, ENODEV);
2307
e68f072b 2308 cancel_work_sync(&hdev->discov_update);
2e93e53b 2309 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 2310 cancel_work_sync(&hdev->scan_update);
53c0ba74 2311 cancel_work_sync(&hdev->connectable_update);
aed1a885 2312 cancel_work_sync(&hdev->discoverable_update);
c366f555 2313 cancel_delayed_work_sync(&hdev->discov_off);
7c1fbed2
JH
2314 cancel_delayed_work_sync(&hdev->le_scan_disable);
2315 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
2316
2317 if (hdev->adv_instance_timeout) {
2318 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2319 hdev->adv_instance_timeout = 0;
2320 }
5fc16cc4 2321}
This page took 0.16825 seconds and 5 git commands to generate.