Commit | Line | Data |
---|---|---|
0857dd3b JH |
1 | /* |
2 | BlueZ - Bluetooth protocol stack for Linux | |
3 | ||
4 | Copyright (C) 2014 Intel Corporation | |
5 | ||
6 | This program is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License version 2 as | |
8 | published by the Free Software Foundation; | |
9 | ||
10 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
11 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
12 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. | |
13 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY | |
14 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES | |
15 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
16 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
17 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
18 | ||
19 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, | |
20 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS | |
21 | SOFTWARE IS DISCLAIMED. | |
22 | */ | |
23 | ||
24 | #include <net/bluetooth/bluetooth.h> | |
25 | #include <net/bluetooth/hci_core.h> | |
26 | ||
27 | #include "smp.h" | |
28 | #include "hci_request.h" | |
29 | ||
be91cd05 JH |
30 | #define HCI_REQ_DONE 0 |
31 | #define HCI_REQ_PEND 1 | |
32 | #define HCI_REQ_CANCELED 2 | |
33 | ||
0857dd3b JH |
34 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) |
35 | { | |
36 | skb_queue_head_init(&req->cmd_q); | |
37 | req->hdev = hdev; | |
38 | req->err = 0; | |
39 | } | |
40 | ||
e6214487 JH |
41 | static int req_run(struct hci_request *req, hci_req_complete_t complete, |
42 | hci_req_complete_skb_t complete_skb) | |
0857dd3b JH |
43 | { |
44 | struct hci_dev *hdev = req->hdev; | |
45 | struct sk_buff *skb; | |
46 | unsigned long flags; | |
47 | ||
48 | BT_DBG("length %u", skb_queue_len(&req->cmd_q)); | |
49 | ||
50 | /* If an error occurred during request building, remove all HCI | |
51 | * commands queued on the HCI request queue. | |
52 | */ | |
53 | if (req->err) { | |
54 | skb_queue_purge(&req->cmd_q); | |
55 | return req->err; | |
56 | } | |
57 | ||
58 | /* Do not allow empty requests */ | |
59 | if (skb_queue_empty(&req->cmd_q)) | |
60 | return -ENODATA; | |
61 | ||
62 | skb = skb_peek_tail(&req->cmd_q); | |
44d27137 JH |
63 | if (complete) { |
64 | bt_cb(skb)->hci.req_complete = complete; | |
65 | } else if (complete_skb) { | |
66 | bt_cb(skb)->hci.req_complete_skb = complete_skb; | |
67 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; | |
68 | } | |
0857dd3b JH |
69 | |
70 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | |
71 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | |
72 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | |
73 | ||
74 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
75 | ||
76 | return 0; | |
77 | } | |
78 | ||
e6214487 JH |
79 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) |
80 | { | |
81 | return req_run(req, complete, NULL); | |
82 | } | |
83 | ||
84 | int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | |
85 | { | |
86 | return req_run(req, NULL, complete); | |
87 | } | |
88 | ||
be91cd05 JH |
89 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, |
90 | struct sk_buff *skb) | |
91 | { | |
92 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | |
93 | ||
94 | if (hdev->req_status == HCI_REQ_PEND) { | |
95 | hdev->req_result = result; | |
96 | hdev->req_status = HCI_REQ_DONE; | |
97 | if (skb) | |
98 | hdev->req_skb = skb_get(skb); | |
99 | wake_up_interruptible(&hdev->req_wait_q); | |
100 | } | |
101 | } | |
102 | ||
b504430c | 103 | void hci_req_sync_cancel(struct hci_dev *hdev, int err) |
be91cd05 JH |
104 | { |
105 | BT_DBG("%s err 0x%2.2x", hdev->name, err); | |
106 | ||
107 | if (hdev->req_status == HCI_REQ_PEND) { | |
108 | hdev->req_result = err; | |
109 | hdev->req_status = HCI_REQ_CANCELED; | |
110 | wake_up_interruptible(&hdev->req_wait_q); | |
111 | } | |
112 | } | |
113 | ||
114 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | |
115 | const void *param, u8 event, u32 timeout) | |
116 | { | |
117 | DECLARE_WAITQUEUE(wait, current); | |
118 | struct hci_request req; | |
119 | struct sk_buff *skb; | |
120 | int err = 0; | |
121 | ||
122 | BT_DBG("%s", hdev->name); | |
123 | ||
124 | hci_req_init(&req, hdev); | |
125 | ||
126 | hci_req_add_ev(&req, opcode, plen, param, event); | |
127 | ||
128 | hdev->req_status = HCI_REQ_PEND; | |
129 | ||
130 | add_wait_queue(&hdev->req_wait_q, &wait); | |
131 | set_current_state(TASK_INTERRUPTIBLE); | |
132 | ||
133 | err = hci_req_run_skb(&req, hci_req_sync_complete); | |
134 | if (err < 0) { | |
135 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
136 | set_current_state(TASK_RUNNING); | |
137 | return ERR_PTR(err); | |
138 | } | |
139 | ||
140 | schedule_timeout(timeout); | |
141 | ||
142 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
143 | ||
144 | if (signal_pending(current)) | |
145 | return ERR_PTR(-EINTR); | |
146 | ||
147 | switch (hdev->req_status) { | |
148 | case HCI_REQ_DONE: | |
149 | err = -bt_to_errno(hdev->req_result); | |
150 | break; | |
151 | ||
152 | case HCI_REQ_CANCELED: | |
153 | err = -hdev->req_result; | |
154 | break; | |
155 | ||
156 | default: | |
157 | err = -ETIMEDOUT; | |
158 | break; | |
159 | } | |
160 | ||
161 | hdev->req_status = hdev->req_result = 0; | |
162 | skb = hdev->req_skb; | |
163 | hdev->req_skb = NULL; | |
164 | ||
165 | BT_DBG("%s end: err %d", hdev->name, err); | |
166 | ||
167 | if (err < 0) { | |
168 | kfree_skb(skb); | |
169 | return ERR_PTR(err); | |
170 | } | |
171 | ||
172 | if (!skb) | |
173 | return ERR_PTR(-ENODATA); | |
174 | ||
175 | return skb; | |
176 | } | |
177 | EXPORT_SYMBOL(__hci_cmd_sync_ev); | |
178 | ||
179 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, | |
180 | const void *param, u32 timeout) | |
181 | { | |
182 | return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); | |
183 | } | |
184 | EXPORT_SYMBOL(__hci_cmd_sync); | |
185 | ||
186 | /* Execute request and wait for completion. */ | |
a1d01db1 JH |
187 | int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, |
188 | unsigned long opt), | |
4ebeee2d | 189 | unsigned long opt, u32 timeout, u8 *hci_status) |
be91cd05 JH |
190 | { |
191 | struct hci_request req; | |
192 | DECLARE_WAITQUEUE(wait, current); | |
193 | int err = 0; | |
194 | ||
195 | BT_DBG("%s start", hdev->name); | |
196 | ||
197 | hci_req_init(&req, hdev); | |
198 | ||
199 | hdev->req_status = HCI_REQ_PEND; | |
200 | ||
a1d01db1 JH |
201 | err = func(&req, opt); |
202 | if (err) { | |
203 | if (hci_status) | |
204 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
205 | return err; | |
206 | } | |
be91cd05 JH |
207 | |
208 | add_wait_queue(&hdev->req_wait_q, &wait); | |
209 | set_current_state(TASK_INTERRUPTIBLE); | |
210 | ||
211 | err = hci_req_run_skb(&req, hci_req_sync_complete); | |
212 | if (err < 0) { | |
213 | hdev->req_status = 0; | |
214 | ||
215 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
216 | set_current_state(TASK_RUNNING); | |
217 | ||
218 | /* ENODATA means the HCI request command queue is empty. | |
219 | * This can happen when a request with conditionals doesn't | |
220 | * trigger any commands to be sent. This is normal behavior | |
221 | * and should not trigger an error return. | |
222 | */ | |
568f44f6 JH |
223 | if (err == -ENODATA) { |
224 | if (hci_status) | |
225 | *hci_status = 0; | |
be91cd05 | 226 | return 0; |
568f44f6 JH |
227 | } |
228 | ||
229 | if (hci_status) | |
230 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
231 | |
232 | return err; | |
233 | } | |
234 | ||
235 | schedule_timeout(timeout); | |
236 | ||
237 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
238 | ||
239 | if (signal_pending(current)) | |
240 | return -EINTR; | |
241 | ||
242 | switch (hdev->req_status) { | |
243 | case HCI_REQ_DONE: | |
244 | err = -bt_to_errno(hdev->req_result); | |
4ebeee2d JH |
245 | if (hci_status) |
246 | *hci_status = hdev->req_result; | |
be91cd05 JH |
247 | break; |
248 | ||
249 | case HCI_REQ_CANCELED: | |
250 | err = -hdev->req_result; | |
4ebeee2d JH |
251 | if (hci_status) |
252 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
253 | break; |
254 | ||
255 | default: | |
256 | err = -ETIMEDOUT; | |
4ebeee2d JH |
257 | if (hci_status) |
258 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
259 | break; |
260 | } | |
261 | ||
262 | hdev->req_status = hdev->req_result = 0; | |
263 | ||
264 | BT_DBG("%s end: err %d", hdev->name, err); | |
265 | ||
266 | return err; | |
267 | } | |
268 | ||
a1d01db1 JH |
269 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, |
270 | unsigned long opt), | |
4ebeee2d | 271 | unsigned long opt, u32 timeout, u8 *hci_status) |
be91cd05 JH |
272 | { |
273 | int ret; | |
274 | ||
275 | if (!test_bit(HCI_UP, &hdev->flags)) | |
276 | return -ENETDOWN; | |
277 | ||
278 | /* Serialize all requests */ | |
b504430c | 279 | hci_req_sync_lock(hdev); |
4ebeee2d | 280 | ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); |
b504430c | 281 | hci_req_sync_unlock(hdev); |
be91cd05 JH |
282 | |
283 | return ret; | |
284 | } | |
285 | ||
0857dd3b JH |
286 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
287 | const void *param) | |
288 | { | |
289 | int len = HCI_COMMAND_HDR_SIZE + plen; | |
290 | struct hci_command_hdr *hdr; | |
291 | struct sk_buff *skb; | |
292 | ||
293 | skb = bt_skb_alloc(len, GFP_ATOMIC); | |
294 | if (!skb) | |
295 | return NULL; | |
296 | ||
297 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); | |
298 | hdr->opcode = cpu_to_le16(opcode); | |
299 | hdr->plen = plen; | |
300 | ||
301 | if (plen) | |
302 | memcpy(skb_put(skb, plen), param, plen); | |
303 | ||
304 | BT_DBG("skb len %d", skb->len); | |
305 | ||
d79f34e3 MH |
306 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
307 | hci_skb_opcode(skb) = opcode; | |
0857dd3b JH |
308 | |
309 | return skb; | |
310 | } | |
311 | ||
312 | /* Queue a command to an asynchronous HCI request */ | |
313 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | |
314 | const void *param, u8 event) | |
315 | { | |
316 | struct hci_dev *hdev = req->hdev; | |
317 | struct sk_buff *skb; | |
318 | ||
319 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | |
320 | ||
321 | /* If an error occurred during request building, there is no point in | |
322 | * queueing the HCI command. We can simply return. | |
323 | */ | |
324 | if (req->err) | |
325 | return; | |
326 | ||
327 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | |
328 | if (!skb) { | |
329 | BT_ERR("%s no memory for command (opcode 0x%4.4x)", | |
330 | hdev->name, opcode); | |
331 | req->err = -ENOMEM; | |
332 | return; | |
333 | } | |
334 | ||
335 | if (skb_queue_empty(&req->cmd_q)) | |
44d27137 | 336 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; |
0857dd3b | 337 | |
242c0ebd | 338 | bt_cb(skb)->hci.req_event = event; |
0857dd3b JH |
339 | |
340 | skb_queue_tail(&req->cmd_q, skb); | |
341 | } | |
342 | ||
343 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, | |
344 | const void *param) | |
345 | { | |
346 | hci_req_add_ev(req, opcode, plen, param, 0); | |
347 | } | |
348 | ||
349 | void hci_req_add_le_scan_disable(struct hci_request *req) | |
350 | { | |
351 | struct hci_cp_le_set_scan_enable cp; | |
352 | ||
353 | memset(&cp, 0, sizeof(cp)); | |
354 | cp.enable = LE_SCAN_DISABLE; | |
355 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | |
356 | } | |
357 | ||
358 | static void add_to_white_list(struct hci_request *req, | |
359 | struct hci_conn_params *params) | |
360 | { | |
361 | struct hci_cp_le_add_to_white_list cp; | |
362 | ||
363 | cp.bdaddr_type = params->addr_type; | |
364 | bacpy(&cp.bdaddr, ¶ms->addr); | |
365 | ||
366 | hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); | |
367 | } | |
368 | ||
369 | static u8 update_white_list(struct hci_request *req) | |
370 | { | |
371 | struct hci_dev *hdev = req->hdev; | |
372 | struct hci_conn_params *params; | |
373 | struct bdaddr_list *b; | |
374 | uint8_t white_list_entries = 0; | |
375 | ||
376 | /* Go through the current white list programmed into the | |
377 | * controller one by one and check if that address is still | |
378 | * in the list of pending connections or list of devices to | |
379 | * report. If not present in either list, then queue the | |
380 | * command to remove it from the controller. | |
381 | */ | |
382 | list_for_each_entry(b, &hdev->le_white_list, list) { | |
383 | struct hci_cp_le_del_from_white_list cp; | |
384 | ||
385 | if (hci_pend_le_action_lookup(&hdev->pend_le_conns, | |
386 | &b->bdaddr, b->bdaddr_type) || | |
387 | hci_pend_le_action_lookup(&hdev->pend_le_reports, | |
388 | &b->bdaddr, b->bdaddr_type)) { | |
389 | white_list_entries++; | |
390 | continue; | |
391 | } | |
392 | ||
393 | cp.bdaddr_type = b->bdaddr_type; | |
394 | bacpy(&cp.bdaddr, &b->bdaddr); | |
395 | ||
396 | hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, | |
397 | sizeof(cp), &cp); | |
398 | } | |
399 | ||
400 | /* Since all no longer valid white list entries have been | |
401 | * removed, walk through the list of pending connections | |
402 | * and ensure that any new device gets programmed into | |
403 | * the controller. | |
404 | * | |
405 | * If the list of the devices is larger than the list of | |
406 | * available white list entries in the controller, then | |
407 | * just abort and return filer policy value to not use the | |
408 | * white list. | |
409 | */ | |
410 | list_for_each_entry(params, &hdev->pend_le_conns, action) { | |
411 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | |
412 | ¶ms->addr, params->addr_type)) | |
413 | continue; | |
414 | ||
415 | if (white_list_entries >= hdev->le_white_list_size) { | |
416 | /* Select filter policy to accept all advertising */ | |
417 | return 0x00; | |
418 | } | |
419 | ||
420 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | |
421 | params->addr_type)) { | |
422 | /* White list can not be used with RPAs */ | |
423 | return 0x00; | |
424 | } | |
425 | ||
426 | white_list_entries++; | |
427 | add_to_white_list(req, params); | |
428 | } | |
429 | ||
430 | /* After adding all new pending connections, walk through | |
431 | * the list of pending reports and also add these to the | |
432 | * white list if there is still space. | |
433 | */ | |
434 | list_for_each_entry(params, &hdev->pend_le_reports, action) { | |
435 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | |
436 | ¶ms->addr, params->addr_type)) | |
437 | continue; | |
438 | ||
439 | if (white_list_entries >= hdev->le_white_list_size) { | |
440 | /* Select filter policy to accept all advertising */ | |
441 | return 0x00; | |
442 | } | |
443 | ||
444 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | |
445 | params->addr_type)) { | |
446 | /* White list can not be used with RPAs */ | |
447 | return 0x00; | |
448 | } | |
449 | ||
450 | white_list_entries++; | |
451 | add_to_white_list(req, params); | |
452 | } | |
453 | ||
454 | /* Select filter policy to use white list */ | |
455 | return 0x01; | |
456 | } | |
457 | ||
458 | void hci_req_add_le_passive_scan(struct hci_request *req) | |
459 | { | |
460 | struct hci_cp_le_set_scan_param param_cp; | |
461 | struct hci_cp_le_set_scan_enable enable_cp; | |
462 | struct hci_dev *hdev = req->hdev; | |
463 | u8 own_addr_type; | |
464 | u8 filter_policy; | |
465 | ||
466 | /* Set require_privacy to false since no SCAN_REQ are send | |
467 | * during passive scanning. Not using an non-resolvable address | |
468 | * here is important so that peer devices using direct | |
469 | * advertising with our address will be correctly reported | |
470 | * by the controller. | |
471 | */ | |
472 | if (hci_update_random_address(req, false, &own_addr_type)) | |
473 | return; | |
474 | ||
475 | /* Adding or removing entries from the white list must | |
476 | * happen before enabling scanning. The controller does | |
477 | * not allow white list modification while scanning. | |
478 | */ | |
479 | filter_policy = update_white_list(req); | |
480 | ||
481 | /* When the controller is using random resolvable addresses and | |
482 | * with that having LE privacy enabled, then controllers with | |
483 | * Extended Scanner Filter Policies support can now enable support | |
484 | * for handling directed advertising. | |
485 | * | |
486 | * So instead of using filter polices 0x00 (no whitelist) | |
487 | * and 0x01 (whitelist enabled) use the new filter policies | |
488 | * 0x02 (no whitelist) and 0x03 (whitelist enabled). | |
489 | */ | |
d7a5a11d | 490 | if (hci_dev_test_flag(hdev, HCI_PRIVACY) && |
0857dd3b JH |
491 | (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) |
492 | filter_policy |= 0x02; | |
493 | ||
494 | memset(¶m_cp, 0, sizeof(param_cp)); | |
495 | param_cp.type = LE_SCAN_PASSIVE; | |
496 | param_cp.interval = cpu_to_le16(hdev->le_scan_interval); | |
497 | param_cp.window = cpu_to_le16(hdev->le_scan_window); | |
498 | param_cp.own_address_type = own_addr_type; | |
499 | param_cp.filter_policy = filter_policy; | |
500 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | |
501 | ¶m_cp); | |
502 | ||
503 | memset(&enable_cp, 0, sizeof(enable_cp)); | |
504 | enable_cp.enable = LE_SCAN_ENABLE; | |
505 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
506 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), | |
507 | &enable_cp); | |
508 | } | |
509 | ||
510 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) | |
511 | { | |
512 | struct hci_dev *hdev = req->hdev; | |
513 | ||
514 | /* If we're advertising or initiating an LE connection we can't | |
515 | * go ahead and change the random address at this time. This is | |
516 | * because the eventual initiator address used for the | |
517 | * subsequently created connection will be undefined (some | |
518 | * controllers use the new address and others the one we had | |
519 | * when the operation started). | |
520 | * | |
521 | * In this kind of scenario skip the update and let the random | |
522 | * address be updated at the next cycle. | |
523 | */ | |
d7a5a11d | 524 | if (hci_dev_test_flag(hdev, HCI_LE_ADV) || |
e7d9ab73 | 525 | hci_lookup_le_connect(hdev)) { |
0857dd3b | 526 | BT_DBG("Deferring random address update"); |
a1536da2 | 527 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); |
0857dd3b JH |
528 | return; |
529 | } | |
530 | ||
531 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); | |
532 | } | |
533 | ||
534 | int hci_update_random_address(struct hci_request *req, bool require_privacy, | |
535 | u8 *own_addr_type) | |
536 | { | |
537 | struct hci_dev *hdev = req->hdev; | |
538 | int err; | |
539 | ||
540 | /* If privacy is enabled use a resolvable private address. If | |
541 | * current RPA has expired or there is something else than | |
542 | * the current RPA in use, then generate a new one. | |
543 | */ | |
d7a5a11d | 544 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { |
0857dd3b JH |
545 | int to; |
546 | ||
547 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
548 | ||
a69d8927 | 549 | if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && |
0857dd3b JH |
550 | !bacmp(&hdev->random_addr, &hdev->rpa)) |
551 | return 0; | |
552 | ||
553 | err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); | |
554 | if (err < 0) { | |
555 | BT_ERR("%s failed to generate new RPA", hdev->name); | |
556 | return err; | |
557 | } | |
558 | ||
559 | set_random_addr(req, &hdev->rpa); | |
560 | ||
561 | to = msecs_to_jiffies(hdev->rpa_timeout * 1000); | |
562 | queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); | |
563 | ||
564 | return 0; | |
565 | } | |
566 | ||
567 | /* In case of required privacy without resolvable private address, | |
568 | * use an non-resolvable private address. This is useful for active | |
569 | * scanning and non-connectable advertising. | |
570 | */ | |
571 | if (require_privacy) { | |
572 | bdaddr_t nrpa; | |
573 | ||
574 | while (true) { | |
575 | /* The non-resolvable private address is generated | |
576 | * from random six bytes with the two most significant | |
577 | * bits cleared. | |
578 | */ | |
579 | get_random_bytes(&nrpa, 6); | |
580 | nrpa.b[5] &= 0x3f; | |
581 | ||
582 | /* The non-resolvable private address shall not be | |
583 | * equal to the public address. | |
584 | */ | |
585 | if (bacmp(&hdev->bdaddr, &nrpa)) | |
586 | break; | |
587 | } | |
588 | ||
589 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
590 | set_random_addr(req, &nrpa); | |
591 | return 0; | |
592 | } | |
593 | ||
594 | /* If forcing static address is in use or there is no public | |
595 | * address use the static address as random address (but skip | |
596 | * the HCI command if the current random address is already the | |
597 | * static one. | |
50b5b952 MH |
598 | * |
599 | * In case BR/EDR has been disabled on a dual-mode controller | |
600 | * and a static address has been configured, then use that | |
601 | * address instead of the public BR/EDR address. | |
0857dd3b | 602 | */ |
b7cb93e5 | 603 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
50b5b952 | 604 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || |
d7a5a11d | 605 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && |
50b5b952 | 606 | bacmp(&hdev->static_addr, BDADDR_ANY))) { |
0857dd3b JH |
607 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
608 | if (bacmp(&hdev->static_addr, &hdev->random_addr)) | |
609 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, | |
610 | &hdev->static_addr); | |
611 | return 0; | |
612 | } | |
613 | ||
614 | /* Neither privacy nor static address is being used so use a | |
615 | * public address. | |
616 | */ | |
617 | *own_addr_type = ADDR_LE_DEV_PUBLIC; | |
618 | ||
619 | return 0; | |
620 | } | |
2cf22218 | 621 | |
405a2611 JH |
622 | static bool disconnected_whitelist_entries(struct hci_dev *hdev) |
623 | { | |
624 | struct bdaddr_list *b; | |
625 | ||
626 | list_for_each_entry(b, &hdev->whitelist, list) { | |
627 | struct hci_conn *conn; | |
628 | ||
629 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); | |
630 | if (!conn) | |
631 | return true; | |
632 | ||
633 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | |
634 | return true; | |
635 | } | |
636 | ||
637 | return false; | |
638 | } | |
639 | ||
01b1cb87 | 640 | void __hci_req_update_scan(struct hci_request *req) |
405a2611 JH |
641 | { |
642 | struct hci_dev *hdev = req->hdev; | |
643 | u8 scan; | |
644 | ||
d7a5a11d | 645 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
405a2611 JH |
646 | return; |
647 | ||
648 | if (!hdev_is_powered(hdev)) | |
649 | return; | |
650 | ||
651 | if (mgmt_powering_down(hdev)) | |
652 | return; | |
653 | ||
d7a5a11d | 654 | if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || |
405a2611 JH |
655 | disconnected_whitelist_entries(hdev)) |
656 | scan = SCAN_PAGE; | |
657 | else | |
658 | scan = SCAN_DISABLED; | |
659 | ||
d7a5a11d | 660 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
405a2611 JH |
661 | scan |= SCAN_INQUIRY; |
662 | ||
01b1cb87 JH |
663 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && |
664 | test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) | |
665 | return; | |
666 | ||
405a2611 JH |
667 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
668 | } | |
669 | ||
01b1cb87 | 670 | static int update_scan(struct hci_request *req, unsigned long opt) |
405a2611 | 671 | { |
01b1cb87 JH |
672 | hci_dev_lock(req->hdev); |
673 | __hci_req_update_scan(req); | |
674 | hci_dev_unlock(req->hdev); | |
675 | return 0; | |
676 | } | |
405a2611 | 677 | |
01b1cb87 JH |
678 | static void scan_update_work(struct work_struct *work) |
679 | { | |
680 | struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); | |
681 | ||
682 | hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); | |
405a2611 JH |
683 | } |
684 | ||
2cf22218 JH |
685 | /* This function controls the background scanning based on hdev->pend_le_conns |
686 | * list. If there are pending LE connection we start the background scanning, | |
687 | * otherwise we stop it. | |
688 | * | |
689 | * This function requires the caller holds hdev->lock. | |
690 | */ | |
145a0913 | 691 | static void __hci_update_background_scan(struct hci_request *req) |
2cf22218 JH |
692 | { |
693 | struct hci_dev *hdev = req->hdev; | |
2cf22218 JH |
694 | |
695 | if (!test_bit(HCI_UP, &hdev->flags) || | |
696 | test_bit(HCI_INIT, &hdev->flags) || | |
d7a5a11d MH |
697 | hci_dev_test_flag(hdev, HCI_SETUP) || |
698 | hci_dev_test_flag(hdev, HCI_CONFIG) || | |
699 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || | |
700 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) | |
2cf22218 JH |
701 | return; |
702 | ||
703 | /* No point in doing scanning if LE support hasn't been enabled */ | |
d7a5a11d | 704 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
2cf22218 JH |
705 | return; |
706 | ||
707 | /* If discovery is active don't interfere with it */ | |
708 | if (hdev->discovery.state != DISCOVERY_STOPPED) | |
709 | return; | |
710 | ||
711 | /* Reset RSSI and UUID filters when starting background scanning | |
712 | * since these filters are meant for service discovery only. | |
713 | * | |
714 | * The Start Discovery and Start Service Discovery operations | |
715 | * ensure to set proper values for RSSI threshold and UUID | |
716 | * filter list. So it is safe to just reset them here. | |
717 | */ | |
718 | hci_discovery_filter_clear(hdev); | |
719 | ||
720 | if (list_empty(&hdev->pend_le_conns) && | |
721 | list_empty(&hdev->pend_le_reports)) { | |
722 | /* If there is no pending LE connections or devices | |
723 | * to be scanned for, we should stop the background | |
724 | * scanning. | |
725 | */ | |
726 | ||
727 | /* If controller is not scanning we are done. */ | |
d7a5a11d | 728 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
2cf22218 JH |
729 | return; |
730 | ||
731 | hci_req_add_le_scan_disable(req); | |
732 | ||
733 | BT_DBG("%s stopping background scanning", hdev->name); | |
734 | } else { | |
735 | /* If there is at least one pending LE connection, we should | |
736 | * keep the background scan running. | |
737 | */ | |
738 | ||
739 | /* If controller is connecting, we should not start scanning | |
740 | * since some controllers are not able to scan and connect at | |
741 | * the same time. | |
742 | */ | |
e7d9ab73 | 743 | if (hci_lookup_le_connect(hdev)) |
2cf22218 JH |
744 | return; |
745 | ||
746 | /* If controller is currently scanning, we stop it to ensure we | |
747 | * don't miss any advertising (due to duplicates filter). | |
748 | */ | |
d7a5a11d | 749 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
2cf22218 JH |
750 | hci_req_add_le_scan_disable(req); |
751 | ||
752 | hci_req_add_le_passive_scan(req); | |
753 | ||
754 | BT_DBG("%s starting background scanning", hdev->name); | |
755 | } | |
756 | } | |
757 | ||
dcc0f0d9 JH |
758 | void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, |
759 | u8 reason) | |
760 | { | |
761 | switch (conn->state) { | |
762 | case BT_CONNECTED: | |
763 | case BT_CONFIG: | |
764 | if (conn->type == AMP_LINK) { | |
765 | struct hci_cp_disconn_phy_link cp; | |
766 | ||
767 | cp.phy_handle = HCI_PHY_HANDLE(conn->handle); | |
768 | cp.reason = reason; | |
769 | hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), | |
770 | &cp); | |
771 | } else { | |
772 | struct hci_cp_disconnect dc; | |
773 | ||
774 | dc.handle = cpu_to_le16(conn->handle); | |
775 | dc.reason = reason; | |
776 | hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); | |
777 | } | |
778 | ||
779 | conn->state = BT_DISCONN; | |
780 | ||
781 | break; | |
782 | case BT_CONNECT: | |
783 | if (conn->type == LE_LINK) { | |
784 | if (test_bit(HCI_CONN_SCANNING, &conn->flags)) | |
785 | break; | |
786 | hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, | |
787 | 0, NULL); | |
788 | } else if (conn->type == ACL_LINK) { | |
789 | if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) | |
790 | break; | |
791 | hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, | |
792 | 6, &conn->dst); | |
793 | } | |
794 | break; | |
795 | case BT_CONNECT2: | |
796 | if (conn->type == ACL_LINK) { | |
797 | struct hci_cp_reject_conn_req rej; | |
798 | ||
799 | bacpy(&rej.bdaddr, &conn->dst); | |
800 | rej.reason = reason; | |
801 | ||
802 | hci_req_add(req, HCI_OP_REJECT_CONN_REQ, | |
803 | sizeof(rej), &rej); | |
804 | } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { | |
805 | struct hci_cp_reject_sync_conn_req rej; | |
806 | ||
807 | bacpy(&rej.bdaddr, &conn->dst); | |
808 | ||
809 | /* SCO rejection has its own limited set of | |
810 | * allowed error values (0x0D-0x0F) which isn't | |
811 | * compatible with most values passed to this | |
812 | * function. To be safe hard-code one of the | |
813 | * values that's suitable for SCO. | |
814 | */ | |
815 | rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES; | |
816 | ||
817 | hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, | |
818 | sizeof(rej), &rej); | |
819 | } | |
820 | break; | |
821 | default: | |
822 | conn->state = BT_CLOSED; | |
823 | break; | |
824 | } | |
825 | } | |
826 | ||
827 | static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) | |
828 | { | |
829 | if (status) | |
830 | BT_DBG("Failed to abort connection: status 0x%2.2x", status); | |
831 | } | |
832 | ||
833 | int hci_abort_conn(struct hci_conn *conn, u8 reason) | |
834 | { | |
835 | struct hci_request req; | |
836 | int err; | |
837 | ||
838 | hci_req_init(&req, conn->hdev); | |
839 | ||
840 | __hci_abort_conn(&req, conn, reason); | |
841 | ||
842 | err = hci_req_run(&req, abort_conn_complete); | |
843 | if (err && err != -ENODATA) { | |
844 | BT_ERR("Failed to run HCI request: err %d", err); | |
845 | return err; | |
846 | } | |
847 | ||
848 | return 0; | |
849 | } | |
5fc16cc4 | 850 | |
a1d01db1 | 851 | static int update_bg_scan(struct hci_request *req, unsigned long opt) |
2e93e53b JH |
852 | { |
853 | hci_dev_lock(req->hdev); | |
854 | __hci_update_background_scan(req); | |
855 | hci_dev_unlock(req->hdev); | |
a1d01db1 | 856 | return 0; |
2e93e53b JH |
857 | } |
858 | ||
859 | static void bg_scan_update(struct work_struct *work) | |
860 | { | |
861 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
862 | bg_scan_update); | |
84235d22 JH |
863 | struct hci_conn *conn; |
864 | u8 status; | |
865 | int err; | |
866 | ||
867 | err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); | |
868 | if (!err) | |
869 | return; | |
870 | ||
871 | hci_dev_lock(hdev); | |
872 | ||
873 | conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); | |
874 | if (conn) | |
875 | hci_le_conn_failed(conn, status); | |
2e93e53b | 876 | |
84235d22 | 877 | hci_dev_unlock(hdev); |
2e93e53b JH |
878 | } |
879 | ||
f4a2cb4d | 880 | static int le_scan_disable(struct hci_request *req, unsigned long opt) |
7c1fbed2 | 881 | { |
f4a2cb4d JH |
882 | hci_req_add_le_scan_disable(req); |
883 | return 0; | |
7c1fbed2 JH |
884 | } |
885 | ||
f4a2cb4d | 886 | static int bredr_inquiry(struct hci_request *req, unsigned long opt) |
7c1fbed2 | 887 | { |
f4a2cb4d | 888 | u8 length = opt; |
7c1fbed2 JH |
889 | /* General inquiry access code (GIAC) */ |
890 | u8 lap[3] = { 0x33, 0x8b, 0x9e }; | |
891 | struct hci_cp_inquiry cp; | |
7c1fbed2 | 892 | |
f4a2cb4d | 893 | BT_DBG("%s", req->hdev->name); |
7c1fbed2 | 894 | |
f4a2cb4d JH |
895 | hci_dev_lock(req->hdev); |
896 | hci_inquiry_cache_flush(req->hdev); | |
897 | hci_dev_unlock(req->hdev); | |
7c1fbed2 | 898 | |
f4a2cb4d JH |
899 | memset(&cp, 0, sizeof(cp)); |
900 | memcpy(&cp.lap, lap, sizeof(cp.lap)); | |
901 | cp.length = length; | |
7c1fbed2 | 902 | |
f4a2cb4d | 903 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); |
7c1fbed2 | 904 | |
a1d01db1 | 905 | return 0; |
7c1fbed2 JH |
906 | } |
907 | ||
908 | static void le_scan_disable_work(struct work_struct *work) | |
909 | { | |
910 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
911 | le_scan_disable.work); | |
912 | u8 status; | |
7c1fbed2 JH |
913 | |
914 | BT_DBG("%s", hdev->name); | |
915 | ||
f4a2cb4d JH |
916 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
917 | return; | |
918 | ||
7c1fbed2 JH |
919 | cancel_delayed_work(&hdev->le_scan_restart); |
920 | ||
f4a2cb4d JH |
921 | hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); |
922 | if (status) { | |
923 | BT_ERR("Failed to disable LE scan: status 0x%02x", status); | |
924 | return; | |
925 | } | |
926 | ||
927 | hdev->discovery.scan_start = 0; | |
928 | ||
929 | /* If we were running LE only scan, change discovery state. If | |
930 | * we were running both LE and BR/EDR inquiry simultaneously, | |
931 | * and BR/EDR inquiry is already finished, stop discovery, | |
932 | * otherwise BR/EDR inquiry will stop discovery when finished. | |
933 | * If we will resolve remote device name, do not change | |
934 | * discovery state. | |
935 | */ | |
936 | ||
937 | if (hdev->discovery.type == DISCOV_TYPE_LE) | |
938 | goto discov_stopped; | |
939 | ||
940 | if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) | |
7c1fbed2 JH |
941 | return; |
942 | ||
f4a2cb4d JH |
943 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { |
944 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && | |
945 | hdev->discovery.state != DISCOVERY_RESOLVING) | |
946 | goto discov_stopped; | |
947 | ||
948 | return; | |
949 | } | |
950 | ||
951 | hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, | |
952 | HCI_CMD_TIMEOUT, &status); | |
953 | if (status) { | |
954 | BT_ERR("Inquiry failed: status 0x%02x", status); | |
955 | goto discov_stopped; | |
956 | } | |
957 | ||
958 | return; | |
959 | ||
960 | discov_stopped: | |
961 | hci_dev_lock(hdev); | |
962 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
963 | hci_dev_unlock(hdev); | |
7c1fbed2 JH |
964 | } |
965 | ||
3dfe5905 JH |
966 | static int le_scan_restart(struct hci_request *req, unsigned long opt) |
967 | { | |
968 | struct hci_dev *hdev = req->hdev; | |
969 | struct hci_cp_le_set_scan_enable cp; | |
970 | ||
971 | /* If controller is not scanning we are done. */ | |
972 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
973 | return 0; | |
974 | ||
975 | hci_req_add_le_scan_disable(req); | |
976 | ||
977 | memset(&cp, 0, sizeof(cp)); | |
978 | cp.enable = LE_SCAN_ENABLE; | |
979 | cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
980 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | |
981 | ||
982 | return 0; | |
983 | } | |
984 | ||
985 | static void le_scan_restart_work(struct work_struct *work) | |
7c1fbed2 | 986 | { |
3dfe5905 JH |
987 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
988 | le_scan_restart.work); | |
7c1fbed2 | 989 | unsigned long timeout, duration, scan_start, now; |
3dfe5905 | 990 | u8 status; |
7c1fbed2 JH |
991 | |
992 | BT_DBG("%s", hdev->name); | |
993 | ||
3dfe5905 | 994 | hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); |
7c1fbed2 JH |
995 | if (status) { |
996 | BT_ERR("Failed to restart LE scan: status %d", status); | |
997 | return; | |
998 | } | |
999 | ||
1000 | hci_dev_lock(hdev); | |
1001 | ||
1002 | if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || | |
1003 | !hdev->discovery.scan_start) | |
1004 | goto unlock; | |
1005 | ||
1006 | /* When the scan was started, hdev->le_scan_disable has been queued | |
1007 | * after duration from scan_start. During scan restart this job | |
1008 | * has been canceled, and we need to queue it again after proper | |
1009 | * timeout, to make sure that scan does not run indefinitely. | |
1010 | */ | |
1011 | duration = hdev->discovery.scan_duration; | |
1012 | scan_start = hdev->discovery.scan_start; | |
1013 | now = jiffies; | |
1014 | if (now - scan_start <= duration) { | |
1015 | int elapsed; | |
1016 | ||
1017 | if (now >= scan_start) | |
1018 | elapsed = now - scan_start; | |
1019 | else | |
1020 | elapsed = ULONG_MAX - scan_start + now; | |
1021 | ||
1022 | timeout = duration - elapsed; | |
1023 | } else { | |
1024 | timeout = 0; | |
1025 | } | |
1026 | ||
1027 | queue_delayed_work(hdev->req_workqueue, | |
1028 | &hdev->le_scan_disable, timeout); | |
1029 | ||
1030 | unlock: | |
1031 | hci_dev_unlock(hdev); | |
1032 | } | |
1033 | ||
e68f072b JH |
1034 | static void cancel_adv_timeout(struct hci_dev *hdev) |
1035 | { | |
1036 | if (hdev->adv_instance_timeout) { | |
1037 | hdev->adv_instance_timeout = 0; | |
1038 | cancel_delayed_work(&hdev->adv_instance_expire); | |
1039 | } | |
1040 | } | |
1041 | ||
1042 | static void disable_advertising(struct hci_request *req) | |
1043 | { | |
1044 | u8 enable = 0x00; | |
1045 | ||
1046 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | |
1047 | } | |
1048 | ||
1049 | static int active_scan(struct hci_request *req, unsigned long opt) | |
1050 | { | |
1051 | uint16_t interval = opt; | |
1052 | struct hci_dev *hdev = req->hdev; | |
1053 | struct hci_cp_le_set_scan_param param_cp; | |
1054 | struct hci_cp_le_set_scan_enable enable_cp; | |
1055 | u8 own_addr_type; | |
1056 | int err; | |
1057 | ||
1058 | BT_DBG("%s", hdev->name); | |
1059 | ||
1060 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { | |
1061 | hci_dev_lock(hdev); | |
1062 | ||
1063 | /* Don't let discovery abort an outgoing connection attempt | |
1064 | * that's using directed advertising. | |
1065 | */ | |
1066 | if (hci_lookup_le_connect(hdev)) { | |
1067 | hci_dev_unlock(hdev); | |
1068 | return -EBUSY; | |
1069 | } | |
1070 | ||
1071 | cancel_adv_timeout(hdev); | |
1072 | hci_dev_unlock(hdev); | |
1073 | ||
1074 | disable_advertising(req); | |
1075 | } | |
1076 | ||
1077 | /* If controller is scanning, it means the background scanning is | |
1078 | * running. Thus, we should temporarily stop it in order to set the | |
1079 | * discovery scanning parameters. | |
1080 | */ | |
1081 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
1082 | hci_req_add_le_scan_disable(req); | |
1083 | ||
1084 | /* All active scans will be done with either a resolvable private | |
1085 | * address (when privacy feature has been enabled) or non-resolvable | |
1086 | * private address. | |
1087 | */ | |
1088 | err = hci_update_random_address(req, true, &own_addr_type); | |
1089 | if (err < 0) | |
1090 | own_addr_type = ADDR_LE_DEV_PUBLIC; | |
1091 | ||
1092 | memset(¶m_cp, 0, sizeof(param_cp)); | |
1093 | param_cp.type = LE_SCAN_ACTIVE; | |
1094 | param_cp.interval = cpu_to_le16(interval); | |
1095 | param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); | |
1096 | param_cp.own_address_type = own_addr_type; | |
1097 | ||
1098 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | |
1099 | ¶m_cp); | |
1100 | ||
1101 | memset(&enable_cp, 0, sizeof(enable_cp)); | |
1102 | enable_cp.enable = LE_SCAN_ENABLE; | |
1103 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
1104 | ||
1105 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), | |
1106 | &enable_cp); | |
1107 | ||
1108 | return 0; | |
1109 | } | |
1110 | ||
1111 | static int interleaved_discov(struct hci_request *req, unsigned long opt) | |
1112 | { | |
1113 | int err; | |
1114 | ||
1115 | BT_DBG("%s", req->hdev->name); | |
1116 | ||
1117 | err = active_scan(req, opt); | |
1118 | if (err) | |
1119 | return err; | |
1120 | ||
7df26b56 | 1121 | return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); |
e68f072b JH |
1122 | } |
1123 | ||
1124 | static void start_discovery(struct hci_dev *hdev, u8 *status) | |
1125 | { | |
1126 | unsigned long timeout; | |
1127 | ||
1128 | BT_DBG("%s type %u", hdev->name, hdev->discovery.type); | |
1129 | ||
1130 | switch (hdev->discovery.type) { | |
1131 | case DISCOV_TYPE_BREDR: | |
1132 | if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) | |
7df26b56 JH |
1133 | hci_req_sync(hdev, bredr_inquiry, |
1134 | DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, | |
e68f072b JH |
1135 | status); |
1136 | return; | |
1137 | case DISCOV_TYPE_INTERLEAVED: | |
1138 | /* When running simultaneous discovery, the LE scanning time | |
1139 | * should occupy the whole discovery time sine BR/EDR inquiry | |
1140 | * and LE scanning are scheduled by the controller. | |
1141 | * | |
1142 | * For interleaving discovery in comparison, BR/EDR inquiry | |
1143 | * and LE scanning are done sequentially with separate | |
1144 | * timeouts. | |
1145 | */ | |
1146 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, | |
1147 | &hdev->quirks)) { | |
1148 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | |
1149 | /* During simultaneous discovery, we double LE scan | |
1150 | * interval. We must leave some time for the controller | |
1151 | * to do BR/EDR inquiry. | |
1152 | */ | |
1153 | hci_req_sync(hdev, interleaved_discov, | |
1154 | DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT, | |
1155 | status); | |
1156 | break; | |
1157 | } | |
1158 | ||
1159 | timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); | |
1160 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | |
1161 | HCI_CMD_TIMEOUT, status); | |
1162 | break; | |
1163 | case DISCOV_TYPE_LE: | |
1164 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | |
1165 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | |
1166 | HCI_CMD_TIMEOUT, status); | |
1167 | break; | |
1168 | default: | |
1169 | *status = HCI_ERROR_UNSPECIFIED; | |
1170 | return; | |
1171 | } | |
1172 | ||
1173 | if (*status) | |
1174 | return; | |
1175 | ||
1176 | BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout)); | |
1177 | ||
1178 | /* When service discovery is used and the controller has a | |
1179 | * strict duplicate filter, it is important to remember the | |
1180 | * start and duration of the scan. This is required for | |
1181 | * restarting scanning during the discovery phase. | |
1182 | */ | |
1183 | if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && | |
1184 | hdev->discovery.result_filtering) { | |
1185 | hdev->discovery.scan_start = jiffies; | |
1186 | hdev->discovery.scan_duration = timeout; | |
1187 | } | |
1188 | ||
1189 | queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, | |
1190 | timeout); | |
1191 | } | |
1192 | ||
2154d3f4 JH |
1193 | bool hci_req_stop_discovery(struct hci_request *req) |
1194 | { | |
1195 | struct hci_dev *hdev = req->hdev; | |
1196 | struct discovery_state *d = &hdev->discovery; | |
1197 | struct hci_cp_remote_name_req_cancel cp; | |
1198 | struct inquiry_entry *e; | |
1199 | bool ret = false; | |
1200 | ||
1201 | BT_DBG("%s state %u", hdev->name, hdev->discovery.state); | |
1202 | ||
1203 | if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { | |
1204 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | |
1205 | hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); | |
1206 | ||
1207 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | |
1208 | cancel_delayed_work(&hdev->le_scan_disable); | |
1209 | hci_req_add_le_scan_disable(req); | |
1210 | } | |
1211 | ||
1212 | ret = true; | |
1213 | } else { | |
1214 | /* Passive scanning */ | |
1215 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | |
1216 | hci_req_add_le_scan_disable(req); | |
1217 | ret = true; | |
1218 | } | |
1219 | } | |
1220 | ||
1221 | /* No further actions needed for LE-only discovery */ | |
1222 | if (d->type == DISCOV_TYPE_LE) | |
1223 | return ret; | |
1224 | ||
1225 | if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { | |
1226 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, | |
1227 | NAME_PENDING); | |
1228 | if (!e) | |
1229 | return ret; | |
1230 | ||
1231 | bacpy(&cp.bdaddr, &e->data.bdaddr); | |
1232 | hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), | |
1233 | &cp); | |
1234 | ret = true; | |
1235 | } | |
1236 | ||
1237 | return ret; | |
1238 | } | |
1239 | ||
1240 | static int stop_discovery(struct hci_request *req, unsigned long opt) | |
1241 | { | |
1242 | hci_dev_lock(req->hdev); | |
1243 | hci_req_stop_discovery(req); | |
1244 | hci_dev_unlock(req->hdev); | |
1245 | ||
1246 | return 0; | |
1247 | } | |
1248 | ||
e68f072b JH |
1249 | static void discov_update(struct work_struct *work) |
1250 | { | |
1251 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
1252 | discov_update); | |
1253 | u8 status = 0; | |
1254 | ||
1255 | switch (hdev->discovery.state) { | |
1256 | case DISCOVERY_STARTING: | |
1257 | start_discovery(hdev, &status); | |
1258 | mgmt_start_discovery_complete(hdev, status); | |
1259 | if (status) | |
1260 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
1261 | else | |
1262 | hci_discovery_set_state(hdev, DISCOVERY_FINDING); | |
1263 | break; | |
2154d3f4 JH |
1264 | case DISCOVERY_STOPPING: |
1265 | hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); | |
1266 | mgmt_stop_discovery_complete(hdev, status); | |
1267 | if (!status) | |
1268 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
1269 | break; | |
e68f072b JH |
1270 | case DISCOVERY_STOPPED: |
1271 | default: | |
1272 | return; | |
1273 | } | |
1274 | } | |
1275 | ||
5fc16cc4 JH |
1276 | void hci_request_setup(struct hci_dev *hdev) |
1277 | { | |
e68f072b | 1278 | INIT_WORK(&hdev->discov_update, discov_update); |
2e93e53b | 1279 | INIT_WORK(&hdev->bg_scan_update, bg_scan_update); |
01b1cb87 | 1280 | INIT_WORK(&hdev->scan_update, scan_update_work); |
7c1fbed2 JH |
1281 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); |
1282 | INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); | |
5fc16cc4 JH |
1283 | } |
1284 | ||
1285 | void hci_request_cancel_all(struct hci_dev *hdev) | |
1286 | { | |
7df0f73e JH |
1287 | hci_req_sync_cancel(hdev, ENODEV); |
1288 | ||
e68f072b | 1289 | cancel_work_sync(&hdev->discov_update); |
2e93e53b | 1290 | cancel_work_sync(&hdev->bg_scan_update); |
01b1cb87 | 1291 | cancel_work_sync(&hdev->scan_update); |
7c1fbed2 JH |
1292 | cancel_delayed_work_sync(&hdev->le_scan_disable); |
1293 | cancel_delayed_work_sync(&hdev->le_scan_restart); | |
5fc16cc4 | 1294 | } |