Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/hci_sync.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* BlueZ - Bluetooth protocol stack for Linux
4
*
5
* Copyright (C) 2021 Intel Corporation
6
* Copyright 2023 NXP
7
*/
8
9
#include <linux/property.h>
10
11
#include <net/bluetooth/bluetooth.h>
12
#include <net/bluetooth/hci_core.h>
13
#include <net/bluetooth/mgmt.h>
14
15
#include "hci_codec.h"
16
#include "hci_debugfs.h"
17
#include "smp.h"
18
#include "eir.h"
19
#include "msft.h"
20
#include "aosp.h"
21
#include "leds.h"
22
23
static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24
struct sk_buff *skb)
25
{
26
bt_dev_dbg(hdev, "result 0x%2.2x", result);
27
28
if (hdev->req_status != HCI_REQ_PEND)
29
return;
30
31
hdev->req_result = result;
32
hdev->req_status = HCI_REQ_DONE;
33
34
/* Free the request command so it is not used as response */
35
kfree_skb(hdev->req_skb);
36
hdev->req_skb = NULL;
37
38
if (skb) {
39
struct sock *sk = hci_skb_sk(skb);
40
41
/* Drop sk reference if set */
42
if (sk)
43
sock_put(sk);
44
45
hdev->req_rsp = skb_get(skb);
46
}
47
48
wake_up_interruptible(&hdev->req_wait_q);
49
}
50
51
struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52
const void *param, struct sock *sk)
53
{
54
int len = HCI_COMMAND_HDR_SIZE + plen;
55
struct hci_command_hdr *hdr;
56
struct sk_buff *skb;
57
58
skb = bt_skb_alloc(len, GFP_ATOMIC);
59
if (!skb)
60
return NULL;
61
62
hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63
hdr->opcode = cpu_to_le16(opcode);
64
hdr->plen = plen;
65
66
if (plen)
67
skb_put_data(skb, param, plen);
68
69
bt_dev_dbg(hdev, "skb len %d", skb->len);
70
71
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72
hci_skb_opcode(skb) = opcode;
73
74
/* Grab a reference if command needs to be associated with a sock (e.g.
75
* likely mgmt socket that initiated the command).
76
*/
77
if (sk) {
78
hci_skb_sk(skb) = sk;
79
sock_hold(sk);
80
}
81
82
return skb;
83
}
84
85
static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86
const void *param, u8 event, struct sock *sk)
87
{
88
struct hci_dev *hdev = req->hdev;
89
struct sk_buff *skb;
90
91
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92
93
/* If an error occurred during request building, there is no point in
94
* queueing the HCI command. We can simply return.
95
*/
96
if (req->err)
97
return;
98
99
skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100
if (!skb) {
101
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102
opcode);
103
req->err = -ENOMEM;
104
return;
105
}
106
107
if (skb_queue_empty(&req->cmd_q))
108
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109
110
hci_skb_event(skb) = event;
111
112
skb_queue_tail(&req->cmd_q, skb);
113
}
114
115
static int hci_req_sync_run(struct hci_request *req)
116
{
117
struct hci_dev *hdev = req->hdev;
118
struct sk_buff *skb;
119
unsigned long flags;
120
121
bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122
123
/* If an error occurred during request building, remove all HCI
124
* commands queued on the HCI request queue.
125
*/
126
if (req->err) {
127
skb_queue_purge(&req->cmd_q);
128
return req->err;
129
}
130
131
/* Do not allow empty requests */
132
if (skb_queue_empty(&req->cmd_q))
133
return -ENODATA;
134
135
skb = skb_peek_tail(&req->cmd_q);
136
bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137
bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138
139
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142
143
queue_work(hdev->workqueue, &hdev->cmd_work);
144
145
return 0;
146
}
147
148
static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149
{
150
skb_queue_head_init(&req->cmd_q);
151
req->hdev = hdev;
152
req->err = 0;
153
}
154
155
/* This function requires the caller holds hdev->req_lock. */
156
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157
const void *param, u8 event, u32 timeout,
158
struct sock *sk)
159
{
160
struct hci_request req;
161
struct sk_buff *skb;
162
int err = 0;
163
164
bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165
166
hci_request_init(&req, hdev);
167
168
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169
170
hdev->req_status = HCI_REQ_PEND;
171
172
err = hci_req_sync_run(&req);
173
if (err < 0)
174
return ERR_PTR(err);
175
176
err = wait_event_interruptible_timeout(hdev->req_wait_q,
177
hdev->req_status != HCI_REQ_PEND,
178
timeout);
179
180
if (err == -ERESTARTSYS)
181
return ERR_PTR(-EINTR);
182
183
switch (hdev->req_status) {
184
case HCI_REQ_DONE:
185
err = -bt_to_errno(hdev->req_result);
186
break;
187
188
case HCI_REQ_CANCELED:
189
err = -hdev->req_result;
190
break;
191
192
default:
193
err = -ETIMEDOUT;
194
break;
195
}
196
197
hdev->req_status = 0;
198
hdev->req_result = 0;
199
skb = hdev->req_rsp;
200
hdev->req_rsp = NULL;
201
202
bt_dev_dbg(hdev, "end: err %d", err);
203
204
if (err < 0) {
205
kfree_skb(skb);
206
return ERR_PTR(err);
207
}
208
209
/* If command return a status event skb will be set to NULL as there are
210
* no parameters.
211
*/
212
if (!skb)
213
return ERR_PTR(-ENODATA);
214
215
return skb;
216
}
217
EXPORT_SYMBOL(__hci_cmd_sync_sk);
218
219
/* This function requires the caller holds hdev->req_lock. */
220
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
221
const void *param, u32 timeout)
222
{
223
return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
224
}
225
EXPORT_SYMBOL(__hci_cmd_sync);
226
227
/* Send HCI command and wait for command complete event */
228
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
229
const void *param, u32 timeout)
230
{
231
struct sk_buff *skb;
232
233
if (!test_bit(HCI_UP, &hdev->flags))
234
return ERR_PTR(-ENETDOWN);
235
236
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
237
238
hci_req_sync_lock(hdev);
239
skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
240
hci_req_sync_unlock(hdev);
241
242
return skb;
243
}
244
EXPORT_SYMBOL(hci_cmd_sync);
245
246
/* This function requires the caller holds hdev->req_lock. */
247
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
248
const void *param, u8 event, u32 timeout)
249
{
250
return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
251
NULL);
252
}
253
EXPORT_SYMBOL(__hci_cmd_sync_ev);
254
255
/* This function requires the caller holds hdev->req_lock. */
256
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
257
const void *param, u8 event, u32 timeout,
258
struct sock *sk)
259
{
260
struct sk_buff *skb;
261
u8 status;
262
263
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
264
265
/* If command return a status event, skb will be set to -ENODATA */
266
if (skb == ERR_PTR(-ENODATA))
267
return 0;
268
269
if (IS_ERR(skb)) {
270
if (!event)
271
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
272
PTR_ERR(skb));
273
return PTR_ERR(skb);
274
}
275
276
status = skb->data[0];
277
278
kfree_skb(skb);
279
280
return status;
281
}
282
EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
283
284
int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
285
const void *param, u32 timeout)
286
{
287
return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
288
NULL);
289
}
290
EXPORT_SYMBOL(__hci_cmd_sync_status);
291
292
int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
293
const void *param, u32 timeout)
294
{
295
int err;
296
297
hci_req_sync_lock(hdev);
298
err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
299
hci_req_sync_unlock(hdev);
300
301
return err;
302
}
303
EXPORT_SYMBOL(hci_cmd_sync_status);
304
305
static void hci_cmd_sync_work(struct work_struct *work)
306
{
307
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
308
309
bt_dev_dbg(hdev, "");
310
311
/* Dequeue all entries and run them */
312
while (1) {
313
struct hci_cmd_sync_work_entry *entry;
314
315
mutex_lock(&hdev->cmd_sync_work_lock);
316
entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
317
struct hci_cmd_sync_work_entry,
318
list);
319
if (entry)
320
list_del(&entry->list);
321
mutex_unlock(&hdev->cmd_sync_work_lock);
322
323
if (!entry)
324
break;
325
326
bt_dev_dbg(hdev, "entry %p", entry);
327
328
if (entry->func) {
329
int err;
330
331
hci_req_sync_lock(hdev);
332
err = entry->func(hdev, entry->data);
333
if (entry->destroy)
334
entry->destroy(hdev, entry->data, err);
335
hci_req_sync_unlock(hdev);
336
}
337
338
kfree(entry);
339
}
340
}
341
342
static void hci_cmd_sync_cancel_work(struct work_struct *work)
343
{
344
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
345
346
cancel_delayed_work_sync(&hdev->cmd_timer);
347
cancel_delayed_work_sync(&hdev->ncmd_timer);
348
atomic_set(&hdev->cmd_cnt, 1);
349
350
wake_up_interruptible(&hdev->req_wait_q);
351
}
352
353
static int hci_scan_disable_sync(struct hci_dev *hdev);
354
static int scan_disable_sync(struct hci_dev *hdev, void *data)
355
{
356
return hci_scan_disable_sync(hdev);
357
}
358
359
static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
360
{
361
return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
362
}
363
364
static void le_scan_disable(struct work_struct *work)
365
{
366
struct hci_dev *hdev = container_of(work, struct hci_dev,
367
le_scan_disable.work);
368
int status;
369
370
bt_dev_dbg(hdev, "");
371
hci_dev_lock(hdev);
372
373
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
374
goto _return;
375
376
status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
377
if (status) {
378
bt_dev_err(hdev, "failed to disable LE scan: %d", status);
379
goto _return;
380
}
381
382
/* If we were running LE only scan, change discovery state. If
383
* we were running both LE and BR/EDR inquiry simultaneously,
384
* and BR/EDR inquiry is already finished, stop discovery,
385
* otherwise BR/EDR inquiry will stop discovery when finished.
386
* If we will resolve remote device name, do not change
387
* discovery state.
388
*/
389
390
if (hdev->discovery.type == DISCOV_TYPE_LE)
391
goto discov_stopped;
392
393
if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
394
goto _return;
395
396
if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) {
397
if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
398
hdev->discovery.state != DISCOVERY_RESOLVING)
399
goto discov_stopped;
400
401
goto _return;
402
}
403
404
status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
405
if (status) {
406
bt_dev_err(hdev, "inquiry failed: status %d", status);
407
goto discov_stopped;
408
}
409
410
goto _return;
411
412
discov_stopped:
413
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
414
415
_return:
416
hci_dev_unlock(hdev);
417
}
418
419
static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
420
u8 filter_dup);
421
422
static int reenable_adv_sync(struct hci_dev *hdev, void *data)
423
{
424
bt_dev_dbg(hdev, "");
425
426
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
427
list_empty(&hdev->adv_instances))
428
return 0;
429
430
if (hdev->cur_adv_instance) {
431
return hci_schedule_adv_instance_sync(hdev,
432
hdev->cur_adv_instance,
433
true);
434
} else {
435
if (ext_adv_capable(hdev)) {
436
hci_start_ext_adv_sync(hdev, 0x00);
437
} else {
438
hci_update_adv_data_sync(hdev, 0x00);
439
hci_update_scan_rsp_data_sync(hdev, 0x00);
440
hci_enable_advertising_sync(hdev);
441
}
442
}
443
444
return 0;
445
}
446
447
static void reenable_adv(struct work_struct *work)
448
{
449
struct hci_dev *hdev = container_of(work, struct hci_dev,
450
reenable_adv_work);
451
int status;
452
453
bt_dev_dbg(hdev, "");
454
455
hci_dev_lock(hdev);
456
457
status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
458
if (status)
459
bt_dev_err(hdev, "failed to reenable ADV: %d", status);
460
461
hci_dev_unlock(hdev);
462
}
463
464
static void cancel_adv_timeout(struct hci_dev *hdev)
465
{
466
if (hdev->adv_instance_timeout) {
467
hdev->adv_instance_timeout = 0;
468
cancel_delayed_work(&hdev->adv_instance_expire);
469
}
470
}
471
472
/* For a single instance:
473
* - force == true: The instance will be removed even when its remaining
474
* lifetime is not zero.
475
* - force == false: the instance will be deactivated but kept stored unless
476
* the remaining lifetime is zero.
477
*
478
* For instance == 0x00:
479
* - force == true: All instances will be removed regardless of their timeout
480
* setting.
481
* - force == false: Only instances that have a timeout will be removed.
482
*/
483
int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
484
u8 instance, bool force)
485
{
486
struct adv_info *adv_instance, *n, *next_instance = NULL;
487
int err;
488
u8 rem_inst;
489
490
/* Cancel any timeout concerning the removed instance(s). */
491
if (!instance || hdev->cur_adv_instance == instance)
492
cancel_adv_timeout(hdev);
493
494
/* Get the next instance to advertise BEFORE we remove
495
* the current one. This can be the same instance again
496
* if there is only one instance.
497
*/
498
if (instance && hdev->cur_adv_instance == instance)
499
next_instance = hci_get_next_instance(hdev, instance);
500
501
if (instance == 0x00) {
502
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
503
list) {
504
if (!(force || adv_instance->timeout))
505
continue;
506
507
rem_inst = adv_instance->instance;
508
err = hci_remove_adv_instance(hdev, rem_inst);
509
if (!err)
510
mgmt_advertising_removed(sk, hdev, rem_inst);
511
}
512
} else {
513
adv_instance = hci_find_adv_instance(hdev, instance);
514
515
if (force || (adv_instance && adv_instance->timeout &&
516
!adv_instance->remaining_time)) {
517
/* Don't advertise a removed instance. */
518
if (next_instance &&
519
next_instance->instance == instance)
520
next_instance = NULL;
521
522
err = hci_remove_adv_instance(hdev, instance);
523
if (!err)
524
mgmt_advertising_removed(sk, hdev, instance);
525
}
526
}
527
528
if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
529
return 0;
530
531
if (next_instance && !ext_adv_capable(hdev))
532
return hci_schedule_adv_instance_sync(hdev,
533
next_instance->instance,
534
false);
535
536
return 0;
537
}
538
539
static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
540
{
541
u8 instance = *(u8 *)data;
542
543
kfree(data);
544
545
hci_clear_adv_instance_sync(hdev, NULL, instance, false);
546
547
if (list_empty(&hdev->adv_instances))
548
return hci_disable_advertising_sync(hdev);
549
550
return 0;
551
}
552
553
static void adv_timeout_expire(struct work_struct *work)
554
{
555
u8 *inst_ptr;
556
struct hci_dev *hdev = container_of(work, struct hci_dev,
557
adv_instance_expire.work);
558
559
bt_dev_dbg(hdev, "");
560
561
hci_dev_lock(hdev);
562
563
hdev->adv_instance_timeout = 0;
564
565
if (hdev->cur_adv_instance == 0x00)
566
goto unlock;
567
568
inst_ptr = kmalloc(1, GFP_KERNEL);
569
if (!inst_ptr)
570
goto unlock;
571
572
*inst_ptr = hdev->cur_adv_instance;
573
hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
574
575
unlock:
576
hci_dev_unlock(hdev);
577
}
578
579
static bool is_interleave_scanning(struct hci_dev *hdev)
580
{
581
return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
582
}
583
584
static int hci_passive_scan_sync(struct hci_dev *hdev);
585
586
static void interleave_scan_work(struct work_struct *work)
587
{
588
struct hci_dev *hdev = container_of(work, struct hci_dev,
589
interleave_scan.work);
590
unsigned long timeout;
591
592
if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
593
timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
594
} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
595
timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
596
} else {
597
bt_dev_err(hdev, "unexpected error");
598
return;
599
}
600
601
hci_passive_scan_sync(hdev);
602
603
hci_dev_lock(hdev);
604
605
switch (hdev->interleave_scan_state) {
606
case INTERLEAVE_SCAN_ALLOWLIST:
607
bt_dev_dbg(hdev, "next state: allowlist");
608
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
609
break;
610
case INTERLEAVE_SCAN_NO_FILTER:
611
bt_dev_dbg(hdev, "next state: no filter");
612
hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
613
break;
614
case INTERLEAVE_SCAN_NONE:
615
bt_dev_err(hdev, "unexpected error");
616
}
617
618
hci_dev_unlock(hdev);
619
620
/* Don't continue interleaving if it was canceled */
621
if (is_interleave_scanning(hdev))
622
queue_delayed_work(hdev->req_workqueue,
623
&hdev->interleave_scan, timeout);
624
}
625
626
void hci_cmd_sync_init(struct hci_dev *hdev)
627
{
628
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
629
INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
630
mutex_init(&hdev->cmd_sync_work_lock);
631
mutex_init(&hdev->unregister_lock);
632
633
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
634
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
635
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
636
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
637
INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
638
}
639
640
static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
641
struct hci_cmd_sync_work_entry *entry,
642
int err)
643
{
644
if (entry->destroy)
645
entry->destroy(hdev, entry->data, err);
646
647
list_del(&entry->list);
648
kfree(entry);
649
}
650
651
void hci_cmd_sync_clear(struct hci_dev *hdev)
652
{
653
struct hci_cmd_sync_work_entry *entry, *tmp;
654
655
cancel_work_sync(&hdev->cmd_sync_work);
656
cancel_work_sync(&hdev->reenable_adv_work);
657
658
mutex_lock(&hdev->cmd_sync_work_lock);
659
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
660
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
661
mutex_unlock(&hdev->cmd_sync_work_lock);
662
}
663
664
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
665
{
666
bt_dev_dbg(hdev, "err 0x%2.2x", err);
667
668
if (hdev->req_status == HCI_REQ_PEND) {
669
hdev->req_result = err;
670
hdev->req_status = HCI_REQ_CANCELED;
671
672
queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
673
}
674
}
675
EXPORT_SYMBOL(hci_cmd_sync_cancel);
676
677
/* Cancel ongoing command request synchronously:
678
*
679
* - Set result and mark status to HCI_REQ_CANCELED
680
* - Wakeup command sync thread
681
*/
682
void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
683
{
684
bt_dev_dbg(hdev, "err 0x%2.2x", err);
685
686
if (hdev->req_status == HCI_REQ_PEND) {
687
/* req_result is __u32 so error must be positive to be properly
688
* propagated.
689
*/
690
hdev->req_result = err < 0 ? -err : err;
691
hdev->req_status = HCI_REQ_CANCELED;
692
693
wake_up_interruptible(&hdev->req_wait_q);
694
}
695
}
696
EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
697
698
/* Submit HCI command to be run in as cmd_sync_work:
699
*
700
* - hdev must _not_ be unregistered
701
*/
702
int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
703
void *data, hci_cmd_sync_work_destroy_t destroy)
704
{
705
struct hci_cmd_sync_work_entry *entry;
706
int err = 0;
707
708
mutex_lock(&hdev->unregister_lock);
709
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
710
err = -ENODEV;
711
goto unlock;
712
}
713
714
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
715
if (!entry) {
716
err = -ENOMEM;
717
goto unlock;
718
}
719
entry->func = func;
720
entry->data = data;
721
entry->destroy = destroy;
722
723
mutex_lock(&hdev->cmd_sync_work_lock);
724
list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
725
mutex_unlock(&hdev->cmd_sync_work_lock);
726
727
queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
728
729
unlock:
730
mutex_unlock(&hdev->unregister_lock);
731
return err;
732
}
733
EXPORT_SYMBOL(hci_cmd_sync_submit);
734
735
/* Queue HCI command:
736
*
737
* - hdev must be running
738
*/
739
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
740
void *data, hci_cmd_sync_work_destroy_t destroy)
741
{
742
/* Only queue command if hdev is running which means it had been opened
743
* and is either on init phase or is already up.
744
*/
745
if (!test_bit(HCI_RUNNING, &hdev->flags))
746
return -ENETDOWN;
747
748
return hci_cmd_sync_submit(hdev, func, data, destroy);
749
}
750
EXPORT_SYMBOL(hci_cmd_sync_queue);
751
752
static struct hci_cmd_sync_work_entry *
753
_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
754
void *data, hci_cmd_sync_work_destroy_t destroy)
755
{
756
struct hci_cmd_sync_work_entry *entry, *tmp;
757
758
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
759
if (func && entry->func != func)
760
continue;
761
762
if (data && entry->data != data)
763
continue;
764
765
if (destroy && entry->destroy != destroy)
766
continue;
767
768
return entry;
769
}
770
771
return NULL;
772
}
773
774
/* Queue HCI command entry once:
775
*
776
* - Lookup if an entry already exist and only if it doesn't creates a new entry
777
* and queue it.
778
*/
779
int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
780
void *data, hci_cmd_sync_work_destroy_t destroy)
781
{
782
if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
783
return 0;
784
785
return hci_cmd_sync_queue(hdev, func, data, destroy);
786
}
787
EXPORT_SYMBOL(hci_cmd_sync_queue_once);
788
789
/* Run HCI command:
790
*
791
* - hdev must be running
792
* - if on cmd_sync_work then run immediately otherwise queue
793
*/
794
int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
795
void *data, hci_cmd_sync_work_destroy_t destroy)
796
{
797
/* Only queue command if hdev is running which means it had been opened
798
* and is either on init phase or is already up.
799
*/
800
if (!test_bit(HCI_RUNNING, &hdev->flags))
801
return -ENETDOWN;
802
803
/* If on cmd_sync_work then run immediately otherwise queue */
804
if (current_work() == &hdev->cmd_sync_work)
805
return func(hdev, data);
806
807
return hci_cmd_sync_submit(hdev, func, data, destroy);
808
}
809
EXPORT_SYMBOL(hci_cmd_sync_run);
810
811
/* Run HCI command entry once:
812
*
813
* - Lookup if an entry already exist and only if it doesn't creates a new entry
814
* and run it.
815
* - if on cmd_sync_work then run immediately otherwise queue
816
*/
817
int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
818
void *data, hci_cmd_sync_work_destroy_t destroy)
819
{
820
if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
821
return 0;
822
823
return hci_cmd_sync_run(hdev, func, data, destroy);
824
}
825
EXPORT_SYMBOL(hci_cmd_sync_run_once);
826
827
/* Lookup HCI command entry:
828
*
829
* - Return first entry that matches by function callback or data or
830
* destroy callback.
831
*/
832
struct hci_cmd_sync_work_entry *
833
hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
834
void *data, hci_cmd_sync_work_destroy_t destroy)
835
{
836
struct hci_cmd_sync_work_entry *entry;
837
838
mutex_lock(&hdev->cmd_sync_work_lock);
839
entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
840
mutex_unlock(&hdev->cmd_sync_work_lock);
841
842
return entry;
843
}
844
EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
845
846
/* Cancel HCI command entry */
847
void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
848
struct hci_cmd_sync_work_entry *entry)
849
{
850
mutex_lock(&hdev->cmd_sync_work_lock);
851
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
852
mutex_unlock(&hdev->cmd_sync_work_lock);
853
}
854
EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
855
856
/* Dequeue one HCI command entry:
857
*
858
* - Lookup and cancel first entry that matches.
859
*/
860
bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
861
hci_cmd_sync_work_func_t func,
862
void *data, hci_cmd_sync_work_destroy_t destroy)
863
{
864
struct hci_cmd_sync_work_entry *entry;
865
866
entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
867
if (!entry)
868
return false;
869
870
hci_cmd_sync_cancel_entry(hdev, entry);
871
872
return true;
873
}
874
EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
875
876
/* Dequeue HCI command entry:
877
*
878
* - Lookup and cancel any entry that matches by function callback or data or
879
* destroy callback.
880
*/
881
bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
882
void *data, hci_cmd_sync_work_destroy_t destroy)
883
{
884
struct hci_cmd_sync_work_entry *entry;
885
bool ret = false;
886
887
mutex_lock(&hdev->cmd_sync_work_lock);
888
while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
889
destroy))) {
890
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
891
ret = true;
892
}
893
mutex_unlock(&hdev->cmd_sync_work_lock);
894
895
return ret;
896
}
897
EXPORT_SYMBOL(hci_cmd_sync_dequeue);
898
899
int hci_update_eir_sync(struct hci_dev *hdev)
900
{
901
struct hci_cp_write_eir cp;
902
903
bt_dev_dbg(hdev, "");
904
905
if (!hdev_is_powered(hdev))
906
return 0;
907
908
if (!lmp_ext_inq_capable(hdev))
909
return 0;
910
911
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
912
return 0;
913
914
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
915
return 0;
916
917
memset(&cp, 0, sizeof(cp));
918
919
eir_create(hdev, cp.data);
920
921
if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
922
return 0;
923
924
memcpy(hdev->eir, cp.data, sizeof(cp.data));
925
926
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
927
HCI_CMD_TIMEOUT);
928
}
929
930
static u8 get_service_classes(struct hci_dev *hdev)
931
{
932
struct bt_uuid *uuid;
933
u8 val = 0;
934
935
list_for_each_entry(uuid, &hdev->uuids, list)
936
val |= uuid->svc_hint;
937
938
return val;
939
}
940
941
int hci_update_class_sync(struct hci_dev *hdev)
942
{
943
u8 cod[3];
944
945
bt_dev_dbg(hdev, "");
946
947
if (!hdev_is_powered(hdev))
948
return 0;
949
950
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
951
return 0;
952
953
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
954
return 0;
955
956
cod[0] = hdev->minor_class;
957
cod[1] = hdev->major_class;
958
cod[2] = get_service_classes(hdev);
959
960
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
961
cod[1] |= 0x20;
962
963
if (memcmp(cod, hdev->dev_class, 3) == 0)
964
return 0;
965
966
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
967
sizeof(cod), cod, HCI_CMD_TIMEOUT);
968
}
969
970
static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
971
{
972
/* If there is no connection we are OK to advertise. */
973
if (hci_conn_num(hdev, LE_LINK) == 0)
974
return true;
975
976
/* Check le_states if there is any connection in peripheral role. */
977
if (hdev->conn_hash.le_num_peripheral > 0) {
978
/* Peripheral connection state and non connectable mode
979
* bit 20.
980
*/
981
if (!connectable && !(hdev->le_states[2] & 0x10))
982
return false;
983
984
/* Peripheral connection state and connectable mode bit 38
985
* and scannable bit 21.
986
*/
987
if (connectable && (!(hdev->le_states[4] & 0x40) ||
988
!(hdev->le_states[2] & 0x20)))
989
return false;
990
}
991
992
/* Check le_states if there is any connection in central role. */
993
if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
994
/* Central connection state and non connectable mode bit 18. */
995
if (!connectable && !(hdev->le_states[2] & 0x02))
996
return false;
997
998
/* Central connection state and connectable mode bit 35 and
999
* scannable 19.
1000
*/
1001
if (connectable && (!(hdev->le_states[4] & 0x08) ||
1002
!(hdev->le_states[2] & 0x08)))
1003
return false;
1004
}
1005
1006
return true;
1007
}
1008
1009
static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1010
{
1011
/* If privacy is not enabled don't use RPA */
1012
if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1013
return false;
1014
1015
/* If basic privacy mode is enabled use RPA */
1016
if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1017
return true;
1018
1019
/* If limited privacy mode is enabled don't use RPA if we're
1020
* both discoverable and bondable.
1021
*/
1022
if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1023
hci_dev_test_flag(hdev, HCI_BONDABLE))
1024
return false;
1025
1026
/* We're neither bondable nor discoverable in the limited
1027
* privacy mode, therefore use RPA.
1028
*/
1029
return true;
1030
}
1031
1032
static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
1033
{
1034
/* If a random_addr has been set we're advertising or initiating an LE
1035
* connection we can't go ahead and change the random address at this
1036
* time. This is because the eventual initiator address used for the
1037
* subsequently created connection will be undefined (some
1038
* controllers use the new address and others the one we had
1039
* when the operation started).
1040
*
1041
* In this kind of scenario skip the update and let the random
1042
* address be updated at the next cycle.
1043
*/
1044
if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
1045
(hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1046
hci_lookup_le_connect(hdev))) {
1047
bt_dev_dbg(hdev, "Deferring random address update");
1048
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1049
return 0;
1050
}
1051
1052
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1053
6, rpa, HCI_CMD_TIMEOUT);
1054
}
1055
1056
int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1057
bool rpa, u8 *own_addr_type)
1058
{
1059
int err;
1060
1061
/* If privacy is enabled use a resolvable private address. If
1062
* current RPA has expired or there is something else than
1063
* the current RPA in use, then generate a new one.
1064
*/
1065
if (rpa) {
1066
/* If Controller supports LL Privacy use own address type is
1067
* 0x03
1068
*/
1069
if (ll_privacy_capable(hdev))
1070
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1071
else
1072
*own_addr_type = ADDR_LE_DEV_RANDOM;
1073
1074
/* Check if RPA is valid */
1075
if (rpa_valid(hdev))
1076
return 0;
1077
1078
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1079
if (err < 0) {
1080
bt_dev_err(hdev, "failed to generate new RPA");
1081
return err;
1082
}
1083
1084
err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1085
if (err)
1086
return err;
1087
1088
return 0;
1089
}
1090
1091
/* In case of required privacy without resolvable private address,
1092
* use an non-resolvable private address. This is useful for active
1093
* scanning and non-connectable advertising.
1094
*/
1095
if (require_privacy) {
1096
bdaddr_t nrpa;
1097
1098
while (true) {
1099
/* The non-resolvable private address is generated
1100
* from random six bytes with the two most significant
1101
* bits cleared.
1102
*/
1103
get_random_bytes(&nrpa, 6);
1104
nrpa.b[5] &= 0x3f;
1105
1106
/* The non-resolvable private address shall not be
1107
* equal to the public address.
1108
*/
1109
if (bacmp(&hdev->bdaddr, &nrpa))
1110
break;
1111
}
1112
1113
*own_addr_type = ADDR_LE_DEV_RANDOM;
1114
1115
return hci_set_random_addr_sync(hdev, &nrpa);
1116
}
1117
1118
/* If forcing static address is in use or there is no public
1119
* address use the static address as random address (but skip
1120
* the HCI command if the current random address is already the
1121
* static one.
1122
*
1123
* In case BR/EDR has been disabled on a dual-mode controller
1124
* and a static address has been configured, then use that
1125
* address instead of the public BR/EDR address.
1126
*/
1127
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1128
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1129
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1130
bacmp(&hdev->static_addr, BDADDR_ANY))) {
1131
*own_addr_type = ADDR_LE_DEV_RANDOM;
1132
if (bacmp(&hdev->static_addr, &hdev->random_addr))
1133
return hci_set_random_addr_sync(hdev,
1134
&hdev->static_addr);
1135
return 0;
1136
}
1137
1138
/* Neither privacy nor static address is being used so use a
1139
* public address.
1140
*/
1141
*own_addr_type = ADDR_LE_DEV_PUBLIC;
1142
1143
return 0;
1144
}
1145
1146
static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1147
{
1148
struct hci_cp_le_set_ext_adv_enable *cp;
1149
struct hci_cp_ext_adv_set *set;
1150
u8 data[sizeof(*cp) + sizeof(*set) * 1];
1151
u8 size;
1152
struct adv_info *adv = NULL;
1153
1154
/* If request specifies an instance that doesn't exist, fail */
1155
if (instance > 0) {
1156
adv = hci_find_adv_instance(hdev, instance);
1157
if (!adv)
1158
return -EINVAL;
1159
1160
/* If not enabled there is nothing to do */
1161
if (!adv->enabled)
1162
return 0;
1163
}
1164
1165
memset(data, 0, sizeof(data));
1166
1167
cp = (void *)data;
1168
set = (void *)cp->data;
1169
1170
/* Instance 0x00 indicates all advertising instances will be disabled */
1171
cp->num_of_sets = !!instance;
1172
cp->enable = 0x00;
1173
1174
set->handle = adv ? adv->handle : instance;
1175
1176
size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1177
1178
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1179
size, data, HCI_CMD_TIMEOUT);
1180
}
1181
1182
static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1183
bdaddr_t *random_addr)
1184
{
1185
struct hci_cp_le_set_adv_set_rand_addr cp;
1186
int err;
1187
1188
if (!instance) {
1189
/* Instance 0x00 doesn't have an adv_info, instead it uses
1190
* hdev->random_addr to track its address so whenever it needs
1191
* to be updated this also set the random address since
1192
* hdev->random_addr is shared with scan state machine.
1193
*/
1194
err = hci_set_random_addr_sync(hdev, random_addr);
1195
if (err)
1196
return err;
1197
}
1198
1199
memset(&cp, 0, sizeof(cp));
1200
1201
cp.handle = instance;
1202
bacpy(&cp.bdaddr, random_addr);
1203
1204
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1205
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1206
}
1207
1208
static int
1209
hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv,
1210
const struct hci_cp_le_set_ext_adv_params *cp,
1211
struct hci_rp_le_set_ext_adv_params *rp)
1212
{
1213
struct sk_buff *skb;
1214
1215
skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp),
1216
cp, HCI_CMD_TIMEOUT);
1217
1218
/* If command return a status event, skb will be set to -ENODATA */
1219
if (skb == ERR_PTR(-ENODATA))
1220
return 0;
1221
1222
if (IS_ERR(skb)) {
1223
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld",
1224
HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb));
1225
return PTR_ERR(skb);
1226
}
1227
1228
if (skb->len != sizeof(*rp)) {
1229
bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u",
1230
HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len);
1231
kfree_skb(skb);
1232
return -EIO;
1233
}
1234
1235
memcpy(rp, skb->data, sizeof(*rp));
1236
kfree_skb(skb);
1237
1238
if (!rp->status) {
1239
hdev->adv_addr_type = cp->own_addr_type;
1240
if (!cp->handle) {
1241
/* Store in hdev for instance 0 */
1242
hdev->adv_tx_power = rp->tx_power;
1243
} else if (adv) {
1244
adv->tx_power = rp->tx_power;
1245
}
1246
}
1247
1248
return rp->status;
1249
}
1250
1251
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1252
{
1253
DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1254
HCI_MAX_EXT_AD_LENGTH);
1255
u8 len;
1256
struct adv_info *adv = NULL;
1257
int err;
1258
1259
if (instance) {
1260
adv = hci_find_adv_instance(hdev, instance);
1261
if (!adv || !adv->adv_data_changed)
1262
return 0;
1263
}
1264
1265
len = eir_create_adv_data(hdev, instance, pdu->data,
1266
HCI_MAX_EXT_AD_LENGTH);
1267
1268
pdu->length = len;
1269
pdu->handle = adv ? adv->handle : instance;
1270
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1271
pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1272
1273
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1274
struct_size(pdu, data, len), pdu,
1275
HCI_CMD_TIMEOUT);
1276
if (err)
1277
return err;
1278
1279
/* Update data if the command succeed */
1280
if (adv) {
1281
adv->adv_data_changed = false;
1282
} else {
1283
memcpy(hdev->adv_data, pdu->data, len);
1284
hdev->adv_data_len = len;
1285
}
1286
1287
return 0;
1288
}
1289
1290
static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1291
{
1292
struct hci_cp_le_set_adv_data cp;
1293
u8 len;
1294
1295
memset(&cp, 0, sizeof(cp));
1296
1297
len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
1298
1299
/* There's nothing to do if the data hasn't changed */
1300
if (hdev->adv_data_len == len &&
1301
memcmp(cp.data, hdev->adv_data, len) == 0)
1302
return 0;
1303
1304
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1305
hdev->adv_data_len = len;
1306
1307
cp.length = len;
1308
1309
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1310
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1311
}
1312
1313
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1314
{
1315
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1316
return 0;
1317
1318
if (ext_adv_capable(hdev))
1319
return hci_set_ext_adv_data_sync(hdev, instance);
1320
1321
return hci_set_adv_data_sync(hdev, instance);
1322
}
1323
1324
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1325
{
1326
struct hci_cp_le_set_ext_adv_params cp;
1327
struct hci_rp_le_set_ext_adv_params rp;
1328
bool connectable, require_privacy;
1329
u32 flags;
1330
bdaddr_t random_addr;
1331
u8 own_addr_type;
1332
int err;
1333
struct adv_info *adv;
1334
bool secondary_adv;
1335
1336
if (instance > 0) {
1337
adv = hci_find_adv_instance(hdev, instance);
1338
if (!adv)
1339
return -EINVAL;
1340
} else {
1341
adv = NULL;
1342
}
1343
1344
/* Updating parameters of an active instance will return a
1345
* Command Disallowed error, so we must first disable the
1346
* instance if it is active.
1347
*/
1348
if (adv) {
1349
err = hci_disable_ext_adv_instance_sync(hdev, instance);
1350
if (err)
1351
return err;
1352
}
1353
1354
flags = hci_adv_instance_flags(hdev, instance);
1355
1356
/* If the "connectable" instance flag was not set, then choose between
1357
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1358
*/
1359
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1360
mgmt_get_connectable(hdev);
1361
1362
if (!is_advertising_allowed(hdev, connectable))
1363
return -EPERM;
1364
1365
/* Set require_privacy to true only when non-connectable
1366
* advertising is used and it is not periodic.
1367
* In that case it is fine to use a non-resolvable private address.
1368
*/
1369
require_privacy = !connectable && !(adv && adv->periodic);
1370
1371
err = hci_get_random_address(hdev, require_privacy,
1372
adv_use_rpa(hdev, flags), adv,
1373
&own_addr_type, &random_addr);
1374
if (err < 0)
1375
return err;
1376
1377
memset(&cp, 0, sizeof(cp));
1378
1379
if (adv) {
1380
hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1381
hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1382
cp.tx_power = adv->tx_power;
1383
cp.sid = adv->sid;
1384
} else {
1385
hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1386
hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1387
cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1388
cp.sid = 0x00;
1389
}
1390
1391
secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1392
1393
if (connectable) {
1394
if (secondary_adv)
1395
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1396
else
1397
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1398
} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1399
(flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1400
if (secondary_adv)
1401
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1402
else
1403
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1404
} else {
1405
if (secondary_adv)
1406
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1407
else
1408
cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1409
}
1410
1411
/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1412
* contains the peer’s Identity Address and the Peer_Address_Type
1413
* parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1414
* These parameters are used to locate the corresponding local IRK in
1415
* the resolving list; this IRK is used to generate their own address
1416
* used in the advertisement.
1417
*/
1418
if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1419
hci_copy_identity_address(hdev, &cp.peer_addr,
1420
&cp.peer_addr_type);
1421
1422
cp.own_addr_type = own_addr_type;
1423
cp.channel_map = hdev->le_adv_channel_map;
1424
cp.handle = adv ? adv->handle : instance;
1425
1426
if (flags & MGMT_ADV_FLAG_SEC_2M) {
1427
cp.primary_phy = HCI_ADV_PHY_1M;
1428
cp.secondary_phy = HCI_ADV_PHY_2M;
1429
} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1430
cp.primary_phy = HCI_ADV_PHY_CODED;
1431
cp.secondary_phy = HCI_ADV_PHY_CODED;
1432
} else {
1433
/* In all other cases use 1M */
1434
cp.primary_phy = HCI_ADV_PHY_1M;
1435
cp.secondary_phy = HCI_ADV_PHY_1M;
1436
}
1437
1438
err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp);
1439
if (err)
1440
return err;
1441
1442
/* Update adv data as tx power is known now */
1443
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
1444
if (err)
1445
return err;
1446
1447
if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1448
own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1449
bacmp(&random_addr, BDADDR_ANY)) {
1450
/* Check if random address need to be updated */
1451
if (adv) {
1452
if (!bacmp(&random_addr, &adv->random_addr))
1453
return 0;
1454
} else {
1455
if (!bacmp(&random_addr, &hdev->random_addr))
1456
return 0;
1457
}
1458
1459
return hci_set_adv_set_random_addr_sync(hdev, instance,
1460
&random_addr);
1461
}
1462
1463
return 0;
1464
}
1465
1466
static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1467
{
1468
DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1469
HCI_MAX_EXT_AD_LENGTH);
1470
u8 len;
1471
struct adv_info *adv = NULL;
1472
int err;
1473
1474
if (instance) {
1475
adv = hci_find_adv_instance(hdev, instance);
1476
if (!adv || !adv->scan_rsp_changed)
1477
return 0;
1478
}
1479
1480
len = eir_create_scan_rsp(hdev, instance, pdu->data);
1481
1482
pdu->handle = adv ? adv->handle : instance;
1483
pdu->length = len;
1484
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1485
pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1486
1487
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1488
struct_size(pdu, data, len), pdu,
1489
HCI_CMD_TIMEOUT);
1490
if (err)
1491
return err;
1492
1493
if (adv) {
1494
adv->scan_rsp_changed = false;
1495
} else {
1496
memcpy(hdev->scan_rsp_data, pdu->data, len);
1497
hdev->scan_rsp_data_len = len;
1498
}
1499
1500
return 0;
1501
}
1502
1503
static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1504
{
1505
struct hci_cp_le_set_scan_rsp_data cp;
1506
u8 len;
1507
1508
memset(&cp, 0, sizeof(cp));
1509
1510
len = eir_create_scan_rsp(hdev, instance, cp.data);
1511
1512
if (hdev->scan_rsp_data_len == len &&
1513
!memcmp(cp.data, hdev->scan_rsp_data, len))
1514
return 0;
1515
1516
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1517
hdev->scan_rsp_data_len = len;
1518
1519
cp.length = len;
1520
1521
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1522
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1523
}
1524
1525
int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1526
{
1527
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1528
return 0;
1529
1530
if (ext_adv_capable(hdev))
1531
return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1532
1533
return __hci_set_scan_rsp_data_sync(hdev, instance);
1534
}
1535
1536
int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1537
{
1538
struct hci_cp_le_set_ext_adv_enable *cp;
1539
struct hci_cp_ext_adv_set *set;
1540
u8 data[sizeof(*cp) + sizeof(*set) * 1];
1541
struct adv_info *adv;
1542
1543
if (instance > 0) {
1544
adv = hci_find_adv_instance(hdev, instance);
1545
if (!adv)
1546
return -EINVAL;
1547
/* If already enabled there is nothing to do */
1548
if (adv->enabled)
1549
return 0;
1550
} else {
1551
adv = NULL;
1552
}
1553
1554
cp = (void *)data;
1555
set = (void *)cp->data;
1556
1557
memset(cp, 0, sizeof(*cp));
1558
1559
cp->enable = 0x01;
1560
cp->num_of_sets = 0x01;
1561
1562
memset(set, 0, sizeof(*set));
1563
1564
set->handle = adv ? adv->handle : instance;
1565
1566
/* Set duration per instance since controller is responsible for
1567
* scheduling it.
1568
*/
1569
if (adv && adv->timeout) {
1570
u16 duration = adv->timeout * MSEC_PER_SEC;
1571
1572
/* Time = N * 10 ms */
1573
set->duration = cpu_to_le16(duration / 10);
1574
}
1575
1576
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1577
sizeof(*cp) +
1578
sizeof(*set) * cp->num_of_sets,
1579
data, HCI_CMD_TIMEOUT);
1580
}
1581
1582
int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1583
{
1584
int err;
1585
1586
err = hci_setup_ext_adv_instance_sync(hdev, instance);
1587
if (err)
1588
return err;
1589
1590
err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1591
if (err)
1592
return err;
1593
1594
return hci_enable_ext_advertising_sync(hdev, instance);
1595
}
1596
1597
int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1598
{
1599
struct hci_cp_le_set_per_adv_enable cp;
1600
struct adv_info *adv = NULL;
1601
1602
/* If periodic advertising already disabled there is nothing to do. */
1603
adv = hci_find_adv_instance(hdev, instance);
1604
if (!adv || !adv->periodic || !adv->enabled)
1605
return 0;
1606
1607
memset(&cp, 0, sizeof(cp));
1608
1609
cp.enable = 0x00;
1610
cp.handle = instance;
1611
1612
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1613
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1614
}
1615
1616
static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1617
u16 min_interval, u16 max_interval)
1618
{
1619
struct hci_cp_le_set_per_adv_params cp;
1620
1621
memset(&cp, 0, sizeof(cp));
1622
1623
if (!min_interval)
1624
min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1625
1626
if (!max_interval)
1627
max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1628
1629
cp.handle = instance;
1630
cp.min_interval = cpu_to_le16(min_interval);
1631
cp.max_interval = cpu_to_le16(max_interval);
1632
cp.periodic_properties = 0x0000;
1633
1634
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1635
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1636
}
1637
1638
static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1639
{
1640
DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1641
HCI_MAX_PER_AD_LENGTH);
1642
u8 len;
1643
struct adv_info *adv = NULL;
1644
1645
if (instance) {
1646
adv = hci_find_adv_instance(hdev, instance);
1647
if (!adv || !adv->periodic)
1648
return 0;
1649
}
1650
1651
len = eir_create_per_adv_data(hdev, instance, pdu->data);
1652
1653
pdu->length = len;
1654
pdu->handle = adv ? adv->handle : instance;
1655
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1656
1657
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1658
struct_size(pdu, data, len), pdu,
1659
HCI_CMD_TIMEOUT);
1660
}
1661
1662
static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1663
{
1664
struct hci_cp_le_set_per_adv_enable cp;
1665
struct adv_info *adv = NULL;
1666
1667
/* If periodic advertising already enabled there is nothing to do. */
1668
adv = hci_find_adv_instance(hdev, instance);
1669
if (adv && adv->periodic && adv->enabled)
1670
return 0;
1671
1672
memset(&cp, 0, sizeof(cp));
1673
1674
cp.enable = 0x01;
1675
cp.handle = instance;
1676
1677
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1678
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1679
}
1680
1681
/* Checks if periodic advertising data contains a Basic Announcement and if it
1682
* does generates a Broadcast ID and add Broadcast Announcement.
1683
*/
1684
static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1685
{
1686
u8 bid[3];
1687
u8 ad[HCI_MAX_EXT_AD_LENGTH];
1688
u8 len;
1689
1690
/* Skip if NULL adv as instance 0x00 is used for general purpose
1691
* advertising so it cannot used for the likes of Broadcast Announcement
1692
* as it can be overwritten at any point.
1693
*/
1694
if (!adv)
1695
return 0;
1696
1697
/* Check if PA data doesn't contains a Basic Audio Announcement then
1698
* there is nothing to do.
1699
*/
1700
if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1701
0x1851, NULL))
1702
return 0;
1703
1704
/* Check if advertising data already has a Broadcast Announcement since
1705
* the process may want to control the Broadcast ID directly and in that
1706
* case the kernel shall no interfere.
1707
*/
1708
if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1709
NULL))
1710
return 0;
1711
1712
/* Generate Broadcast ID */
1713
get_random_bytes(bid, sizeof(bid));
1714
len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1715
memcpy(ad + len, adv->adv_data, adv->adv_data_len);
1716
hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len,
1717
ad, 0, NULL);
1718
1719
return hci_update_adv_data_sync(hdev, adv->instance);
1720
}
1721
1722
int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid,
1723
u8 data_len, u8 *data, u32 flags, u16 min_interval,
1724
u16 max_interval, u16 sync_interval)
1725
{
1726
struct adv_info *adv = NULL;
1727
int err;
1728
bool added = false;
1729
1730
hci_disable_per_advertising_sync(hdev, instance);
1731
1732
if (instance) {
1733
adv = hci_find_adv_instance(hdev, instance);
1734
if (adv) {
1735
if (sid != HCI_SID_INVALID && adv->sid != sid) {
1736
/* If the SID don't match attempt to find by
1737
* SID.
1738
*/
1739
adv = hci_find_adv_sid(hdev, sid);
1740
if (!adv) {
1741
bt_dev_err(hdev,
1742
"Unable to find adv_info");
1743
return -EINVAL;
1744
}
1745
}
1746
1747
/* Turn it into periodic advertising */
1748
adv->periodic = true;
1749
adv->per_adv_data_len = data_len;
1750
if (data)
1751
memcpy(adv->per_adv_data, data, data_len);
1752
adv->flags = flags;
1753
} else if (!adv) {
1754
/* Create an instance if that could not be found */
1755
adv = hci_add_per_instance(hdev, instance, sid, flags,
1756
data_len, data,
1757
sync_interval,
1758
sync_interval);
1759
if (IS_ERR(adv))
1760
return PTR_ERR(adv);
1761
adv->pending = false;
1762
added = true;
1763
}
1764
}
1765
1766
/* Start advertising */
1767
err = hci_start_ext_adv_sync(hdev, instance);
1768
if (err < 0)
1769
goto fail;
1770
1771
err = hci_adv_bcast_annoucement(hdev, adv);
1772
if (err < 0)
1773
goto fail;
1774
1775
err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1776
max_interval);
1777
if (err < 0)
1778
goto fail;
1779
1780
err = hci_set_per_adv_data_sync(hdev, instance);
1781
if (err < 0)
1782
goto fail;
1783
1784
err = hci_enable_per_advertising_sync(hdev, instance);
1785
if (err < 0)
1786
goto fail;
1787
1788
return 0;
1789
1790
fail:
1791
if (added)
1792
hci_remove_adv_instance(hdev, instance);
1793
1794
return err;
1795
}
1796
1797
static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1798
{
1799
int err;
1800
1801
if (ext_adv_capable(hdev))
1802
return hci_start_ext_adv_sync(hdev, instance);
1803
1804
err = hci_update_adv_data_sync(hdev, instance);
1805
if (err)
1806
return err;
1807
1808
err = hci_update_scan_rsp_data_sync(hdev, instance);
1809
if (err)
1810
return err;
1811
1812
return hci_enable_advertising_sync(hdev);
1813
}
1814
1815
int hci_enable_advertising_sync(struct hci_dev *hdev)
1816
{
1817
struct adv_info *adv_instance;
1818
struct hci_cp_le_set_adv_param cp;
1819
u8 own_addr_type, enable = 0x01;
1820
bool connectable;
1821
u16 adv_min_interval, adv_max_interval;
1822
u32 flags;
1823
u8 status;
1824
1825
if (ext_adv_capable(hdev))
1826
return hci_enable_ext_advertising_sync(hdev,
1827
hdev->cur_adv_instance);
1828
1829
flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1830
adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1831
1832
/* If the "connectable" instance flag was not set, then choose between
1833
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1834
*/
1835
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1836
mgmt_get_connectable(hdev);
1837
1838
if (!is_advertising_allowed(hdev, connectable))
1839
return -EINVAL;
1840
1841
status = hci_disable_advertising_sync(hdev);
1842
if (status)
1843
return status;
1844
1845
/* Clear the HCI_LE_ADV bit temporarily so that the
1846
* hci_update_random_address knows that it's safe to go ahead
1847
* and write a new random address. The flag will be set back on
1848
* as soon as the SET_ADV_ENABLE HCI command completes.
1849
*/
1850
hci_dev_clear_flag(hdev, HCI_LE_ADV);
1851
1852
/* Set require_privacy to true only when non-connectable
1853
* advertising is used. In that case it is fine to use a
1854
* non-resolvable private address.
1855
*/
1856
status = hci_update_random_address_sync(hdev, !connectable,
1857
adv_use_rpa(hdev, flags),
1858
&own_addr_type);
1859
if (status)
1860
return status;
1861
1862
memset(&cp, 0, sizeof(cp));
1863
1864
if (adv_instance) {
1865
adv_min_interval = adv_instance->min_interval;
1866
adv_max_interval = adv_instance->max_interval;
1867
} else {
1868
adv_min_interval = hdev->le_adv_min_interval;
1869
adv_max_interval = hdev->le_adv_max_interval;
1870
}
1871
1872
if (connectable) {
1873
cp.type = LE_ADV_IND;
1874
} else {
1875
if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1876
cp.type = LE_ADV_SCAN_IND;
1877
else
1878
cp.type = LE_ADV_NONCONN_IND;
1879
1880
if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1881
hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1882
adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1883
adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1884
}
1885
}
1886
1887
cp.min_interval = cpu_to_le16(adv_min_interval);
1888
cp.max_interval = cpu_to_le16(adv_max_interval);
1889
cp.own_address_type = own_addr_type;
1890
cp.channel_map = hdev->le_adv_channel_map;
1891
1892
status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1893
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1894
if (status)
1895
return status;
1896
1897
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1898
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1899
}
1900
1901
static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1902
{
1903
return hci_enable_advertising_sync(hdev);
1904
}
1905
1906
int hci_enable_advertising(struct hci_dev *hdev)
1907
{
1908
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1909
list_empty(&hdev->adv_instances))
1910
return 0;
1911
1912
return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1913
}
1914
1915
int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1916
struct sock *sk)
1917
{
1918
int err;
1919
1920
if (!ext_adv_capable(hdev))
1921
return 0;
1922
1923
err = hci_disable_ext_adv_instance_sync(hdev, instance);
1924
if (err)
1925
return err;
1926
1927
/* If request specifies an instance that doesn't exist, fail */
1928
if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1929
return -EINVAL;
1930
1931
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1932
sizeof(instance), &instance, 0,
1933
HCI_CMD_TIMEOUT, sk);
1934
}
1935
1936
int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1937
{
1938
struct hci_cp_le_term_big cp;
1939
1940
memset(&cp, 0, sizeof(cp));
1941
cp.handle = handle;
1942
cp.reason = reason;
1943
1944
return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1945
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1946
}
1947
1948
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1949
bool force)
1950
{
1951
struct adv_info *adv = NULL;
1952
u16 timeout;
1953
1954
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1955
return -EPERM;
1956
1957
if (hdev->adv_instance_timeout)
1958
return -EBUSY;
1959
1960
adv = hci_find_adv_instance(hdev, instance);
1961
if (!adv)
1962
return -ENOENT;
1963
1964
/* A zero timeout means unlimited advertising. As long as there is
1965
* only one instance, duration should be ignored. We still set a timeout
1966
* in case further instances are being added later on.
1967
*
1968
* If the remaining lifetime of the instance is more than the duration
1969
* then the timeout corresponds to the duration, otherwise it will be
1970
* reduced to the remaining instance lifetime.
1971
*/
1972
if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1973
timeout = adv->duration;
1974
else
1975
timeout = adv->remaining_time;
1976
1977
/* The remaining time is being reduced unless the instance is being
1978
* advertised without time limit.
1979
*/
1980
if (adv->timeout)
1981
adv->remaining_time = adv->remaining_time - timeout;
1982
1983
/* Only use work for scheduling instances with legacy advertising */
1984
if (!ext_adv_capable(hdev)) {
1985
hdev->adv_instance_timeout = timeout;
1986
queue_delayed_work(hdev->req_workqueue,
1987
&hdev->adv_instance_expire,
1988
secs_to_jiffies(timeout));
1989
}
1990
1991
/* If we're just re-scheduling the same instance again then do not
1992
* execute any HCI commands. This happens when a single instance is
1993
* being advertised.
1994
*/
1995
if (!force && hdev->cur_adv_instance == instance &&
1996
hci_dev_test_flag(hdev, HCI_LE_ADV))
1997
return 0;
1998
1999
hdev->cur_adv_instance = instance;
2000
2001
return hci_start_adv_sync(hdev, instance);
2002
}
2003
2004
static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
2005
{
2006
int err;
2007
2008
if (!ext_adv_capable(hdev))
2009
return 0;
2010
2011
/* Disable instance 0x00 to disable all instances */
2012
err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2013
if (err)
2014
return err;
2015
2016
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
2017
0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2018
}
2019
2020
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
2021
{
2022
struct adv_info *adv, *n;
2023
2024
if (ext_adv_capable(hdev))
2025
/* Remove all existing sets */
2026
return hci_clear_adv_sets_sync(hdev, sk);
2027
2028
/* This is safe as long as there is no command send while the lock is
2029
* held.
2030
*/
2031
hci_dev_lock(hdev);
2032
2033
/* Cleanup non-ext instances */
2034
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
2035
u8 instance = adv->instance;
2036
int err;
2037
2038
if (!(force || adv->timeout))
2039
continue;
2040
2041
err = hci_remove_adv_instance(hdev, instance);
2042
if (!err)
2043
mgmt_advertising_removed(sk, hdev, instance);
2044
}
2045
2046
hci_dev_unlock(hdev);
2047
2048
return 0;
2049
}
2050
2051
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
2052
struct sock *sk)
2053
{
2054
int err;
2055
2056
/* If we use extended advertising, instance has to be removed first. */
2057
if (ext_adv_capable(hdev))
2058
return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
2059
2060
/* This is safe as long as there is no command send while the lock is
2061
* held.
2062
*/
2063
hci_dev_lock(hdev);
2064
2065
err = hci_remove_adv_instance(hdev, instance);
2066
if (!err)
2067
mgmt_advertising_removed(sk, hdev, instance);
2068
2069
hci_dev_unlock(hdev);
2070
2071
return err;
2072
}
2073
2074
/* For a single instance:
2075
* - force == true: The instance will be removed even when its remaining
2076
* lifetime is not zero.
2077
* - force == false: the instance will be deactivated but kept stored unless
2078
* the remaining lifetime is zero.
2079
*
2080
* For instance == 0x00:
2081
* - force == true: All instances will be removed regardless of their timeout
2082
* setting.
2083
* - force == false: Only instances that have a timeout will be removed.
2084
*/
2085
int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
2086
u8 instance, bool force)
2087
{
2088
struct adv_info *next = NULL;
2089
int err;
2090
2091
/* Cancel any timeout concerning the removed instance(s). */
2092
if (!instance || hdev->cur_adv_instance == instance)
2093
cancel_adv_timeout(hdev);
2094
2095
/* Get the next instance to advertise BEFORE we remove
2096
* the current one. This can be the same instance again
2097
* if there is only one instance.
2098
*/
2099
if (hdev->cur_adv_instance == instance)
2100
next = hci_get_next_instance(hdev, instance);
2101
2102
if (!instance) {
2103
err = hci_clear_adv_sync(hdev, sk, force);
2104
if (err)
2105
return err;
2106
} else {
2107
struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2108
2109
if (force || (adv && adv->timeout && !adv->remaining_time)) {
2110
/* Don't advertise a removed instance. */
2111
if (next && next->instance == instance)
2112
next = NULL;
2113
2114
err = hci_remove_adv_sync(hdev, instance, sk);
2115
if (err)
2116
return err;
2117
}
2118
}
2119
2120
if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2121
return 0;
2122
2123
if (next && !ext_adv_capable(hdev))
2124
hci_schedule_adv_instance_sync(hdev, next->instance, false);
2125
2126
return 0;
2127
}
2128
2129
int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2130
{
2131
struct hci_cp_read_rssi cp;
2132
2133
cp.handle = handle;
2134
return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2135
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2136
}
2137
2138
int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2139
{
2140
return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2141
sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2142
}
2143
2144
int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2145
{
2146
struct hci_cp_read_tx_power cp;
2147
2148
cp.handle = handle;
2149
cp.type = type;
2150
return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2151
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2152
}
2153
2154
int hci_disable_advertising_sync(struct hci_dev *hdev)
2155
{
2156
u8 enable = 0x00;
2157
2158
/* If controller is not advertising we are done. */
2159
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2160
return 0;
2161
2162
if (ext_adv_capable(hdev))
2163
return hci_disable_ext_adv_instance_sync(hdev, 0x00);
2164
2165
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2166
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2167
}
2168
2169
static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2170
u8 filter_dup)
2171
{
2172
struct hci_cp_le_set_ext_scan_enable cp;
2173
2174
memset(&cp, 0, sizeof(cp));
2175
cp.enable = val;
2176
2177
if (hci_dev_test_flag(hdev, HCI_MESH))
2178
cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2179
else
2180
cp.filter_dup = filter_dup;
2181
2182
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2183
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2184
}
2185
2186
static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2187
u8 filter_dup)
2188
{
2189
struct hci_cp_le_set_scan_enable cp;
2190
2191
if (use_ext_scan(hdev))
2192
return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2193
2194
memset(&cp, 0, sizeof(cp));
2195
cp.enable = val;
2196
2197
if (val && hci_dev_test_flag(hdev, HCI_MESH))
2198
cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2199
else
2200
cp.filter_dup = filter_dup;
2201
2202
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2203
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2204
}
2205
2206
static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2207
{
2208
if (!ll_privacy_capable(hdev))
2209
return 0;
2210
2211
/* If controller is not/already resolving we are done. */
2212
if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2213
return 0;
2214
2215
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2216
sizeof(val), &val, HCI_CMD_TIMEOUT);
2217
}
2218
2219
static int hci_scan_disable_sync(struct hci_dev *hdev)
2220
{
2221
int err;
2222
2223
/* If controller is not scanning we are done. */
2224
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2225
return 0;
2226
2227
if (hdev->scanning_paused) {
2228
bt_dev_dbg(hdev, "Scanning is paused for suspend");
2229
return 0;
2230
}
2231
2232
err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2233
if (err) {
2234
bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2235
return err;
2236
}
2237
2238
return err;
2239
}
2240
2241
static bool scan_use_rpa(struct hci_dev *hdev)
2242
{
2243
return hci_dev_test_flag(hdev, HCI_PRIVACY);
2244
}
2245
2246
static void hci_start_interleave_scan(struct hci_dev *hdev)
2247
{
2248
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2249
queue_delayed_work(hdev->req_workqueue,
2250
&hdev->interleave_scan, 0);
2251
}
2252
2253
static void cancel_interleave_scan(struct hci_dev *hdev)
2254
{
2255
bt_dev_dbg(hdev, "cancelling interleave scan");
2256
2257
cancel_delayed_work_sync(&hdev->interleave_scan);
2258
2259
hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2260
}
2261
2262
/* Return true if interleave_scan wasn't started until exiting this function,
2263
* otherwise, return false
2264
*/
2265
static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2266
{
2267
/* Do interleaved scan only if all of the following are true:
2268
* - There is at least one ADV monitor
2269
* - At least one pending LE connection or one device to be scanned for
2270
* - Monitor offloading is not supported
2271
* If so, we should alternate between allowlist scan and one without
2272
* any filters to save power.
2273
*/
2274
bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2275
!(list_empty(&hdev->pend_le_conns) &&
2276
list_empty(&hdev->pend_le_reports)) &&
2277
hci_get_adv_monitor_offload_ext(hdev) ==
2278
HCI_ADV_MONITOR_EXT_NONE;
2279
bool is_interleaving = is_interleave_scanning(hdev);
2280
2281
if (use_interleaving && !is_interleaving) {
2282
hci_start_interleave_scan(hdev);
2283
bt_dev_dbg(hdev, "starting interleave scan");
2284
return true;
2285
}
2286
2287
if (!use_interleaving && is_interleaving)
2288
cancel_interleave_scan(hdev);
2289
2290
return false;
2291
}
2292
2293
/* Removes connection to resolve list if needed.*/
2294
static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2295
bdaddr_t *bdaddr, u8 bdaddr_type)
2296
{
2297
struct hci_cp_le_del_from_resolv_list cp;
2298
struct bdaddr_list_with_irk *entry;
2299
2300
if (!ll_privacy_capable(hdev))
2301
return 0;
2302
2303
/* Check if the IRK has been programmed */
2304
entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2305
bdaddr_type);
2306
if (!entry)
2307
return 0;
2308
2309
cp.bdaddr_type = bdaddr_type;
2310
bacpy(&cp.bdaddr, bdaddr);
2311
2312
return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2313
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2314
}
2315
2316
static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2317
bdaddr_t *bdaddr, u8 bdaddr_type)
2318
{
2319
struct hci_cp_le_del_from_accept_list cp;
2320
int err;
2321
2322
/* Check if device is on accept list before removing it */
2323
if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2324
return 0;
2325
2326
cp.bdaddr_type = bdaddr_type;
2327
bacpy(&cp.bdaddr, bdaddr);
2328
2329
/* Ignore errors when removing from resolving list as that is likely
2330
* that the device was never added.
2331
*/
2332
hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2333
2334
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2335
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2336
if (err) {
2337
bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2338
return err;
2339
}
2340
2341
bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2342
cp.bdaddr_type);
2343
2344
return 0;
2345
}
2346
2347
struct conn_params {
2348
bdaddr_t addr;
2349
u8 addr_type;
2350
hci_conn_flags_t flags;
2351
u8 privacy_mode;
2352
};
2353
2354
/* Adds connection to resolve list if needed.
2355
* Setting params to NULL programs local hdev->irk
2356
*/
2357
static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2358
struct conn_params *params)
2359
{
2360
struct hci_cp_le_add_to_resolv_list cp;
2361
struct smp_irk *irk;
2362
struct bdaddr_list_with_irk *entry;
2363
struct hci_conn_params *p;
2364
2365
if (!ll_privacy_capable(hdev))
2366
return 0;
2367
2368
/* Attempt to program local identity address, type and irk if params is
2369
* NULL.
2370
*/
2371
if (!params) {
2372
if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2373
return 0;
2374
2375
hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2376
memcpy(cp.peer_irk, hdev->irk, 16);
2377
goto done;
2378
} else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
2379
return 0;
2380
2381
irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2382
if (!irk)
2383
return 0;
2384
2385
/* Check if the IK has _not_ been programmed yet. */
2386
entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2387
&params->addr,
2388
params->addr_type);
2389
if (entry)
2390
return 0;
2391
2392
cp.bdaddr_type = params->addr_type;
2393
bacpy(&cp.bdaddr, &params->addr);
2394
memcpy(cp.peer_irk, irk->val, 16);
2395
2396
/* Default privacy mode is always Network */
2397
params->privacy_mode = HCI_NETWORK_PRIVACY;
2398
2399
rcu_read_lock();
2400
p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2401
&params->addr, params->addr_type);
2402
if (!p)
2403
p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2404
&params->addr, params->addr_type);
2405
if (p)
2406
WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2407
rcu_read_unlock();
2408
2409
done:
2410
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2411
memcpy(cp.local_irk, hdev->irk, 16);
2412
else
2413
memset(cp.local_irk, 0, 16);
2414
2415
return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2416
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2417
}
2418
2419
/* Set Device Privacy Mode. */
2420
static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2421
struct conn_params *params)
2422
{
2423
struct hci_cp_le_set_privacy_mode cp;
2424
struct smp_irk *irk;
2425
2426
if (!ll_privacy_capable(hdev) ||
2427
!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
2428
return 0;
2429
2430
/* If device privacy mode has already been set there is nothing to do */
2431
if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2432
return 0;
2433
2434
/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2435
* indicates that LL Privacy has been enabled and
2436
* HCI_OP_LE_SET_PRIVACY_MODE is supported.
2437
*/
2438
if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2439
return 0;
2440
2441
irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2442
if (!irk)
2443
return 0;
2444
2445
memset(&cp, 0, sizeof(cp));
2446
cp.bdaddr_type = irk->addr_type;
2447
bacpy(&cp.bdaddr, &irk->bdaddr);
2448
cp.mode = HCI_DEVICE_PRIVACY;
2449
2450
/* Note: params->privacy_mode is not updated since it is a copy */
2451
2452
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2453
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2454
}
2455
2456
/* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2457
* this attempts to program the device in the resolving list as well and
2458
* properly set the privacy mode.
2459
*/
2460
static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2461
struct conn_params *params,
2462
u8 *num_entries)
2463
{
2464
struct hci_cp_le_add_to_accept_list cp;
2465
int err;
2466
2467
/* During suspend, only wakeable devices can be in acceptlist */
2468
if (hdev->suspended &&
2469
!(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2470
hci_le_del_accept_list_sync(hdev, &params->addr,
2471
params->addr_type);
2472
return 0;
2473
}
2474
2475
/* Select filter policy to accept all advertising */
2476
if (*num_entries >= hdev->le_accept_list_size)
2477
return -ENOSPC;
2478
2479
/* Attempt to program the device in the resolving list first to avoid
2480
* having to rollback in case it fails since the resolving list is
2481
* dynamic it can probably be smaller than the accept list.
2482
*/
2483
err = hci_le_add_resolve_list_sync(hdev, params);
2484
if (err) {
2485
bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2486
return err;
2487
}
2488
2489
/* Set Privacy Mode */
2490
err = hci_le_set_privacy_mode_sync(hdev, params);
2491
if (err) {
2492
bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2493
return err;
2494
}
2495
2496
/* Check if already in accept list */
2497
if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2498
params->addr_type))
2499
return 0;
2500
2501
*num_entries += 1;
2502
cp.bdaddr_type = params->addr_type;
2503
bacpy(&cp.bdaddr, &params->addr);
2504
2505
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2506
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2507
if (err) {
2508
bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2509
/* Rollback the device from the resolving list */
2510
hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2511
return err;
2512
}
2513
2514
bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2515
cp.bdaddr_type);
2516
2517
return 0;
2518
}
2519
2520
/* This function disables/pause all advertising instances */
2521
static int hci_pause_advertising_sync(struct hci_dev *hdev)
2522
{
2523
int err;
2524
int old_state;
2525
2526
/* If controller is not advertising we are done. */
2527
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2528
return 0;
2529
2530
/* If already been paused there is nothing to do. */
2531
if (hdev->advertising_paused)
2532
return 0;
2533
2534
bt_dev_dbg(hdev, "Pausing directed advertising");
2535
2536
/* Stop directed advertising */
2537
old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2538
if (old_state) {
2539
/* When discoverable timeout triggers, then just make sure
2540
* the limited discoverable flag is cleared. Even in the case
2541
* of a timeout triggered from general discoverable, it is
2542
* safe to unconditionally clear the flag.
2543
*/
2544
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2545
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2546
hdev->discov_timeout = 0;
2547
}
2548
2549
bt_dev_dbg(hdev, "Pausing advertising instances");
2550
2551
/* Call to disable any advertisements active on the controller.
2552
* This will succeed even if no advertisements are configured.
2553
*/
2554
err = hci_disable_advertising_sync(hdev);
2555
if (err)
2556
return err;
2557
2558
/* If we are using software rotation, pause the loop */
2559
if (!ext_adv_capable(hdev))
2560
cancel_adv_timeout(hdev);
2561
2562
hdev->advertising_paused = true;
2563
hdev->advertising_old_state = old_state;
2564
2565
return 0;
2566
}
2567
2568
/* This function enables all user advertising instances */
2569
static int hci_resume_advertising_sync(struct hci_dev *hdev)
2570
{
2571
struct adv_info *adv, *tmp;
2572
int err;
2573
2574
/* If advertising has not been paused there is nothing to do. */
2575
if (!hdev->advertising_paused)
2576
return 0;
2577
2578
/* Resume directed advertising */
2579
hdev->advertising_paused = false;
2580
if (hdev->advertising_old_state) {
2581
hci_dev_set_flag(hdev, HCI_ADVERTISING);
2582
hdev->advertising_old_state = 0;
2583
}
2584
2585
bt_dev_dbg(hdev, "Resuming advertising instances");
2586
2587
if (ext_adv_capable(hdev)) {
2588
/* Call for each tracked instance to be re-enabled */
2589
list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2590
err = hci_enable_ext_advertising_sync(hdev,
2591
adv->instance);
2592
if (!err)
2593
continue;
2594
2595
/* If the instance cannot be resumed remove it */
2596
hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2597
NULL);
2598
}
2599
2600
/* If current advertising instance is set to instance 0x00
2601
* then we need to re-enable it.
2602
*/
2603
if (!hdev->cur_adv_instance)
2604
err = hci_enable_ext_advertising_sync(hdev,
2605
hdev->cur_adv_instance);
2606
} else {
2607
/* Schedule for most recent instance to be restarted and begin
2608
* the software rotation loop
2609
*/
2610
err = hci_schedule_adv_instance_sync(hdev,
2611
hdev->cur_adv_instance,
2612
true);
2613
}
2614
2615
hdev->advertising_paused = false;
2616
2617
return err;
2618
}
2619
2620
static int hci_pause_addr_resolution(struct hci_dev *hdev)
2621
{
2622
int err;
2623
2624
if (!ll_privacy_capable(hdev))
2625
return 0;
2626
2627
if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2628
return 0;
2629
2630
/* Cannot disable addr resolution if scanning is enabled or
2631
* when initiating an LE connection.
2632
*/
2633
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2634
hci_lookup_le_connect(hdev)) {
2635
bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2636
return -EPERM;
2637
}
2638
2639
/* Cannot disable addr resolution if advertising is enabled. */
2640
err = hci_pause_advertising_sync(hdev);
2641
if (err) {
2642
bt_dev_err(hdev, "Pause advertising failed: %d", err);
2643
return err;
2644
}
2645
2646
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2647
if (err)
2648
bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2649
err);
2650
2651
/* Return if address resolution is disabled and RPA is not used. */
2652
if (!err && scan_use_rpa(hdev))
2653
return 0;
2654
2655
hci_resume_advertising_sync(hdev);
2656
return err;
2657
}
2658
2659
struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2660
bool extended, struct sock *sk)
2661
{
2662
u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2663
HCI_OP_READ_LOCAL_OOB_DATA;
2664
2665
return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2666
}
2667
2668
static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2669
{
2670
struct hci_conn_params *params;
2671
struct conn_params *p;
2672
size_t i;
2673
2674
rcu_read_lock();
2675
2676
i = 0;
2677
list_for_each_entry_rcu(params, list, action)
2678
++i;
2679
*n = i;
2680
2681
rcu_read_unlock();
2682
2683
p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2684
if (!p)
2685
return NULL;
2686
2687
rcu_read_lock();
2688
2689
i = 0;
2690
list_for_each_entry_rcu(params, list, action) {
2691
/* Racing adds are handled in next scan update */
2692
if (i >= *n)
2693
break;
2694
2695
/* No hdev->lock, but: addr, addr_type are immutable.
2696
* privacy_mode is only written by us or in
2697
* hci_cc_le_set_privacy_mode that we wait for.
2698
* We should be idempotent so MGMT updating flags
2699
* while we are processing is OK.
2700
*/
2701
bacpy(&p[i].addr, &params->addr);
2702
p[i].addr_type = params->addr_type;
2703
p[i].flags = READ_ONCE(params->flags);
2704
p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2705
++i;
2706
}
2707
2708
rcu_read_unlock();
2709
2710
*n = i;
2711
return p;
2712
}
2713
2714
/* Clear LE Accept List */
2715
static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2716
{
2717
if (!(hdev->commands[26] & 0x80))
2718
return 0;
2719
2720
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2721
HCI_CMD_TIMEOUT);
2722
}
2723
2724
/* Device must not be scanning when updating the accept list.
2725
*
2726
* Update is done using the following sequence:
2727
*
2728
* ll_privacy_capable((Disable Advertising) -> Disable Resolving List) ->
2729
* Remove Devices From Accept List ->
2730
* (has IRK && ll_privacy_capable(Remove Devices From Resolving List))->
2731
* Add Devices to Accept List ->
2732
* (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) ->
2733
* ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) ->
2734
* Enable Scanning
2735
*
2736
* In case of failure advertising shall be restored to its original state and
2737
* return would disable accept list since either accept or resolving list could
2738
* not be programmed.
2739
*
2740
*/
2741
static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2742
{
2743
struct conn_params *params;
2744
struct bdaddr_list *b, *t;
2745
u8 num_entries = 0;
2746
bool pend_conn, pend_report;
2747
u8 filter_policy;
2748
size_t i, n;
2749
int err;
2750
2751
/* Pause advertising if resolving list can be used as controllers
2752
* cannot accept resolving list modifications while advertising.
2753
*/
2754
if (ll_privacy_capable(hdev)) {
2755
err = hci_pause_advertising_sync(hdev);
2756
if (err) {
2757
bt_dev_err(hdev, "pause advertising failed: %d", err);
2758
return 0x00;
2759
}
2760
}
2761
2762
/* Disable address resolution while reprogramming accept list since
2763
* devices that do have an IRK will be programmed in the resolving list
2764
* when LL Privacy is enabled.
2765
*/
2766
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2767
if (err) {
2768
bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2769
goto done;
2770
}
2771
2772
/* Force address filtering if PA Sync is in progress */
2773
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2774
struct hci_conn *conn;
2775
2776
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
2777
if (conn) {
2778
struct conn_params pa;
2779
2780
memset(&pa, 0, sizeof(pa));
2781
2782
bacpy(&pa.addr, &conn->dst);
2783
pa.addr_type = conn->dst_type;
2784
2785
/* Clear first since there could be addresses left
2786
* behind.
2787
*/
2788
hci_le_clear_accept_list_sync(hdev);
2789
2790
num_entries = 1;
2791
err = hci_le_add_accept_list_sync(hdev, &pa,
2792
&num_entries);
2793
goto done;
2794
}
2795
}
2796
2797
/* Go through the current accept list programmed into the
2798
* controller one by one and check if that address is connected or is
2799
* still in the list of pending connections or list of devices to
2800
* report. If not present in either list, then remove it from
2801
* the controller.
2802
*/
2803
list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2804
if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2805
continue;
2806
2807
/* Pointers not dereferenced, no locks needed */
2808
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2809
&b->bdaddr,
2810
b->bdaddr_type);
2811
pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2812
&b->bdaddr,
2813
b->bdaddr_type);
2814
2815
/* If the device is not likely to connect or report,
2816
* remove it from the acceptlist.
2817
*/
2818
if (!pend_conn && !pend_report) {
2819
hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2820
b->bdaddr_type);
2821
continue;
2822
}
2823
2824
num_entries++;
2825
}
2826
2827
/* Since all no longer valid accept list entries have been
2828
* removed, walk through the list of pending connections
2829
* and ensure that any new device gets programmed into
2830
* the controller.
2831
*
2832
* If the list of the devices is larger than the list of
2833
* available accept list entries in the controller, then
2834
* just abort and return filer policy value to not use the
2835
* accept list.
2836
*
2837
* The list and params may be mutated while we wait for events,
2838
* so make a copy and iterate it.
2839
*/
2840
2841
params = conn_params_copy(&hdev->pend_le_conns, &n);
2842
if (!params) {
2843
err = -ENOMEM;
2844
goto done;
2845
}
2846
2847
for (i = 0; i < n; ++i) {
2848
err = hci_le_add_accept_list_sync(hdev, &params[i],
2849
&num_entries);
2850
if (err) {
2851
kvfree(params);
2852
goto done;
2853
}
2854
}
2855
2856
kvfree(params);
2857
2858
/* After adding all new pending connections, walk through
2859
* the list of pending reports and also add these to the
2860
* accept list if there is still space. Abort if space runs out.
2861
*/
2862
2863
params = conn_params_copy(&hdev->pend_le_reports, &n);
2864
if (!params) {
2865
err = -ENOMEM;
2866
goto done;
2867
}
2868
2869
for (i = 0; i < n; ++i) {
2870
err = hci_le_add_accept_list_sync(hdev, &params[i],
2871
&num_entries);
2872
if (err) {
2873
kvfree(params);
2874
goto done;
2875
}
2876
}
2877
2878
kvfree(params);
2879
2880
/* Use the allowlist unless the following conditions are all true:
2881
* - We are not currently suspending
2882
* - There are 1 or more ADV monitors registered and it's not offloaded
2883
* - Interleaved scanning is not currently using the allowlist
2884
*/
2885
if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2886
hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2887
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2888
err = -EINVAL;
2889
2890
done:
2891
filter_policy = err ? 0x00 : 0x01;
2892
2893
/* Enable address resolution when LL Privacy is enabled. */
2894
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2895
if (err)
2896
bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2897
2898
/* Resume advertising if it was paused */
2899
if (ll_privacy_capable(hdev))
2900
hci_resume_advertising_sync(hdev);
2901
2902
/* Select filter policy to use accept list */
2903
return filter_policy;
2904
}
2905
2906
static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2907
u8 type, u16 interval, u16 window)
2908
{
2909
cp->type = type;
2910
cp->interval = cpu_to_le16(interval);
2911
cp->window = cpu_to_le16(window);
2912
}
2913
2914
static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2915
u16 interval, u16 window,
2916
u8 own_addr_type, u8 filter_policy)
2917
{
2918
struct hci_cp_le_set_ext_scan_params *cp;
2919
struct hci_cp_le_scan_phy_params *phy;
2920
u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2921
u8 num_phy = 0x00;
2922
2923
cp = (void *)data;
2924
phy = (void *)cp->data;
2925
2926
memset(data, 0, sizeof(data));
2927
2928
cp->own_addr_type = own_addr_type;
2929
cp->filter_policy = filter_policy;
2930
2931
/* Check if PA Sync is in progress then select the PHY based on the
2932
* hci_conn.iso_qos.
2933
*/
2934
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2935
struct hci_cp_le_add_to_accept_list *sent;
2936
2937
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2938
if (sent) {
2939
struct hci_conn *conn;
2940
2941
conn = hci_conn_hash_lookup_ba(hdev, PA_LINK,
2942
&sent->bdaddr);
2943
if (conn) {
2944
struct bt_iso_qos *qos = &conn->iso_qos;
2945
2946
if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2947
qos->bcast.in.phy & BT_ISO_PHY_2M) {
2948
cp->scanning_phys |= LE_SCAN_PHY_1M;
2949
hci_le_scan_phy_params(phy, type,
2950
interval,
2951
window);
2952
num_phy++;
2953
phy++;
2954
}
2955
2956
if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2957
cp->scanning_phys |= LE_SCAN_PHY_CODED;
2958
hci_le_scan_phy_params(phy, type,
2959
interval * 3,
2960
window * 3);
2961
num_phy++;
2962
phy++;
2963
}
2964
2965
if (num_phy)
2966
goto done;
2967
}
2968
}
2969
}
2970
2971
if (scan_1m(hdev) || scan_2m(hdev)) {
2972
cp->scanning_phys |= LE_SCAN_PHY_1M;
2973
hci_le_scan_phy_params(phy, type, interval, window);
2974
num_phy++;
2975
phy++;
2976
}
2977
2978
if (scan_coded(hdev)) {
2979
cp->scanning_phys |= LE_SCAN_PHY_CODED;
2980
hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2981
num_phy++;
2982
phy++;
2983
}
2984
2985
done:
2986
if (!num_phy)
2987
return -EINVAL;
2988
2989
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2990
sizeof(*cp) + sizeof(*phy) * num_phy,
2991
data, HCI_CMD_TIMEOUT);
2992
}
2993
2994
static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2995
u16 interval, u16 window,
2996
u8 own_addr_type, u8 filter_policy)
2997
{
2998
struct hci_cp_le_set_scan_param cp;
2999
3000
if (use_ext_scan(hdev))
3001
return hci_le_set_ext_scan_param_sync(hdev, type, interval,
3002
window, own_addr_type,
3003
filter_policy);
3004
3005
memset(&cp, 0, sizeof(cp));
3006
cp.type = type;
3007
cp.interval = cpu_to_le16(interval);
3008
cp.window = cpu_to_le16(window);
3009
cp.own_address_type = own_addr_type;
3010
cp.filter_policy = filter_policy;
3011
3012
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
3013
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3014
}
3015
3016
static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
3017
u16 window, u8 own_addr_type, u8 filter_policy,
3018
u8 filter_dup)
3019
{
3020
int err;
3021
3022
if (hdev->scanning_paused) {
3023
bt_dev_dbg(hdev, "Scanning is paused for suspend");
3024
return 0;
3025
}
3026
3027
err = hci_le_set_scan_param_sync(hdev, type, interval, window,
3028
own_addr_type, filter_policy);
3029
if (err)
3030
return err;
3031
3032
return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
3033
}
3034
3035
static int hci_passive_scan_sync(struct hci_dev *hdev)
3036
{
3037
u8 own_addr_type;
3038
u8 filter_policy;
3039
u16 window, interval;
3040
u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
3041
int err;
3042
3043
if (hdev->scanning_paused) {
3044
bt_dev_dbg(hdev, "Scanning is paused for suspend");
3045
return 0;
3046
}
3047
3048
err = hci_scan_disable_sync(hdev);
3049
if (err) {
3050
bt_dev_err(hdev, "disable scanning failed: %d", err);
3051
return err;
3052
}
3053
3054
/* Set require_privacy to false since no SCAN_REQ are send
3055
* during passive scanning. Not using an non-resolvable address
3056
* here is important so that peer devices using direct
3057
* advertising with our address will be correctly reported
3058
* by the controller.
3059
*/
3060
if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
3061
&own_addr_type))
3062
return 0;
3063
3064
if (hdev->enable_advmon_interleave_scan &&
3065
hci_update_interleaved_scan_sync(hdev))
3066
return 0;
3067
3068
bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
3069
3070
/* Adding or removing entries from the accept list must
3071
* happen before enabling scanning. The controller does
3072
* not allow accept list modification while scanning.
3073
*/
3074
filter_policy = hci_update_accept_list_sync(hdev);
3075
3076
/* If suspended and filter_policy set to 0x00 (no acceptlist) then
3077
* passive scanning cannot be started since that would require the host
3078
* to be woken up to process the reports.
3079
*/
3080
if (hdev->suspended && !filter_policy) {
3081
/* Check if accept list is empty then there is no need to scan
3082
* while suspended.
3083
*/
3084
if (list_empty(&hdev->le_accept_list))
3085
return 0;
3086
3087
/* If there are devices is the accept_list that means some
3088
* devices could not be programmed which in non-suspended case
3089
* means filter_policy needs to be set to 0x00 so the host needs
3090
* to filter, but since this is treating suspended case we
3091
* can ignore device needing host to filter to allow devices in
3092
* the acceptlist to be able to wakeup the system.
3093
*/
3094
filter_policy = 0x01;
3095
}
3096
3097
/* When the controller is using random resolvable addresses and
3098
* with that having LE privacy enabled, then controllers with
3099
* Extended Scanner Filter Policies support can now enable support
3100
* for handling directed advertising.
3101
*
3102
* So instead of using filter polices 0x00 (no acceptlist)
3103
* and 0x01 (acceptlist enabled) use the new filter policies
3104
* 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3105
*/
3106
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3107
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3108
filter_policy |= 0x02;
3109
3110
if (hdev->suspended) {
3111
window = hdev->le_scan_window_suspend;
3112
interval = hdev->le_scan_int_suspend;
3113
} else if (hci_is_le_conn_scanning(hdev)) {
3114
window = hdev->le_scan_window_connect;
3115
interval = hdev->le_scan_int_connect;
3116
} else if (hci_is_adv_monitoring(hdev)) {
3117
window = hdev->le_scan_window_adv_monitor;
3118
interval = hdev->le_scan_int_adv_monitor;
3119
3120
/* Disable duplicates filter when scanning for advertisement
3121
* monitor for the following reasons.
3122
*
3123
* For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3124
* controllers ignore RSSI_Sampling_Period when the duplicates
3125
* filter is enabled.
3126
*
3127
* For SW pattern filtering, when we're not doing interleaved
3128
* scanning, it is necessary to disable duplicates filter,
3129
* otherwise hosts can only receive one advertisement and it's
3130
* impossible to know if a peer is still in range.
3131
*/
3132
filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3133
} else {
3134
window = hdev->le_scan_window;
3135
interval = hdev->le_scan_interval;
3136
}
3137
3138
/* Disable all filtering for Mesh */
3139
if (hci_dev_test_flag(hdev, HCI_MESH)) {
3140
filter_policy = 0;
3141
filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3142
}
3143
3144
bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3145
3146
return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3147
own_addr_type, filter_policy, filter_dups);
3148
}
3149
3150
/* This function controls the passive scanning based on hdev->pend_le_conns
3151
* list. If there are pending LE connection we start the background scanning,
3152
* otherwise we stop it in the following sequence:
3153
*
3154
* If there are devices to scan:
3155
*
3156
* Disable Scanning -> Update Accept List ->
3157
* ll_privacy_capable((Disable Advertising) -> Disable Resolving List ->
3158
* Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3159
* Enable Scanning
3160
*
3161
* Otherwise:
3162
*
3163
* Disable Scanning
3164
*/
3165
int hci_update_passive_scan_sync(struct hci_dev *hdev)
3166
{
3167
int err;
3168
3169
if (!test_bit(HCI_UP, &hdev->flags) ||
3170
test_bit(HCI_INIT, &hdev->flags) ||
3171
hci_dev_test_flag(hdev, HCI_SETUP) ||
3172
hci_dev_test_flag(hdev, HCI_CONFIG) ||
3173
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3174
hci_dev_test_flag(hdev, HCI_UNREGISTER))
3175
return 0;
3176
3177
/* No point in doing scanning if LE support hasn't been enabled */
3178
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3179
return 0;
3180
3181
/* If discovery is active don't interfere with it */
3182
if (hdev->discovery.state != DISCOVERY_STOPPED)
3183
return 0;
3184
3185
/* Reset RSSI and UUID filters when starting background scanning
3186
* since these filters are meant for service discovery only.
3187
*
3188
* The Start Discovery and Start Service Discovery operations
3189
* ensure to set proper values for RSSI threshold and UUID
3190
* filter list. So it is safe to just reset them here.
3191
*/
3192
hci_discovery_filter_clear(hdev);
3193
3194
bt_dev_dbg(hdev, "ADV monitoring is %s",
3195
hci_is_adv_monitoring(hdev) ? "on" : "off");
3196
3197
if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3198
list_empty(&hdev->pend_le_conns) &&
3199
list_empty(&hdev->pend_le_reports) &&
3200
!hci_is_adv_monitoring(hdev) &&
3201
!hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3202
/* If there is no pending LE connections or devices
3203
* to be scanned for or no ADV monitors, we should stop the
3204
* background scanning.
3205
*/
3206
3207
bt_dev_dbg(hdev, "stopping background scanning");
3208
3209
err = hci_scan_disable_sync(hdev);
3210
if (err)
3211
bt_dev_err(hdev, "stop background scanning failed: %d",
3212
err);
3213
} else {
3214
/* If there is at least one pending LE connection, we should
3215
* keep the background scan running.
3216
*/
3217
3218
/* If controller is connecting, we should not start scanning
3219
* since some controllers are not able to scan and connect at
3220
* the same time.
3221
*/
3222
if (hci_lookup_le_connect(hdev))
3223
return 0;
3224
3225
bt_dev_dbg(hdev, "start background scanning");
3226
3227
err = hci_passive_scan_sync(hdev);
3228
if (err)
3229
bt_dev_err(hdev, "start background scanning failed: %d",
3230
err);
3231
}
3232
3233
return err;
3234
}
3235
3236
static int update_scan_sync(struct hci_dev *hdev, void *data)
3237
{
3238
return hci_update_scan_sync(hdev);
3239
}
3240
3241
int hci_update_scan(struct hci_dev *hdev)
3242
{
3243
return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3244
}
3245
3246
static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3247
{
3248
return hci_update_passive_scan_sync(hdev);
3249
}
3250
3251
int hci_update_passive_scan(struct hci_dev *hdev)
3252
{
3253
/* Only queue if it would have any effect */
3254
if (!test_bit(HCI_UP, &hdev->flags) ||
3255
test_bit(HCI_INIT, &hdev->flags) ||
3256
hci_dev_test_flag(hdev, HCI_SETUP) ||
3257
hci_dev_test_flag(hdev, HCI_CONFIG) ||
3258
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3259
hci_dev_test_flag(hdev, HCI_UNREGISTER))
3260
return 0;
3261
3262
return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3263
NULL);
3264
}
3265
3266
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3267
{
3268
int err;
3269
3270
if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3271
return 0;
3272
3273
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3274
sizeof(val), &val, HCI_CMD_TIMEOUT);
3275
3276
if (!err) {
3277
if (val) {
3278
hdev->features[1][0] |= LMP_HOST_SC;
3279
hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3280
} else {
3281
hdev->features[1][0] &= ~LMP_HOST_SC;
3282
hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3283
}
3284
}
3285
3286
return err;
3287
}
3288
3289
int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3290
{
3291
int err;
3292
3293
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3294
lmp_host_ssp_capable(hdev))
3295
return 0;
3296
3297
if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3298
__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3299
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3300
}
3301
3302
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3303
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3304
if (err)
3305
return err;
3306
3307
return hci_write_sc_support_sync(hdev, 0x01);
3308
}
3309
3310
int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3311
{
3312
struct hci_cp_write_le_host_supported cp;
3313
3314
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3315
!lmp_bredr_capable(hdev))
3316
return 0;
3317
3318
/* Check first if we already have the right host state
3319
* (host features set)
3320
*/
3321
if (le == lmp_host_le_capable(hdev) &&
3322
simul == lmp_host_le_br_capable(hdev))
3323
return 0;
3324
3325
memset(&cp, 0, sizeof(cp));
3326
3327
cp.le = le;
3328
cp.simul = simul;
3329
3330
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3331
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3332
}
3333
3334
static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3335
{
3336
struct adv_info *adv, *tmp;
3337
int err;
3338
3339
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3340
return 0;
3341
3342
/* If RPA Resolution has not been enable yet it means the
3343
* resolving list is empty and we should attempt to program the
3344
* local IRK in order to support using own_addr_type
3345
* ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3346
*/
3347
if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3348
hci_le_add_resolve_list_sync(hdev, NULL);
3349
hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3350
}
3351
3352
/* Make sure the controller has a good default for
3353
* advertising data. This also applies to the case
3354
* where BR/EDR was toggled during the AUTO_OFF phase.
3355
*/
3356
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3357
list_empty(&hdev->adv_instances)) {
3358
if (ext_adv_capable(hdev)) {
3359
err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3360
if (!err)
3361
hci_update_scan_rsp_data_sync(hdev, 0x00);
3362
} else {
3363
err = hci_update_adv_data_sync(hdev, 0x00);
3364
if (!err)
3365
hci_update_scan_rsp_data_sync(hdev, 0x00);
3366
}
3367
3368
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3369
hci_enable_advertising_sync(hdev);
3370
}
3371
3372
/* Call for each tracked instance to be scheduled */
3373
list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3374
hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3375
3376
return 0;
3377
}
3378
3379
static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3380
{
3381
u8 link_sec;
3382
3383
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3384
if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3385
return 0;
3386
3387
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3388
sizeof(link_sec), &link_sec,
3389
HCI_CMD_TIMEOUT);
3390
}
3391
3392
int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3393
{
3394
struct hci_cp_write_page_scan_activity cp;
3395
u8 type;
3396
int err = 0;
3397
3398
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3399
return 0;
3400
3401
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3402
return 0;
3403
3404
memset(&cp, 0, sizeof(cp));
3405
3406
if (enable) {
3407
type = PAGE_SCAN_TYPE_INTERLACED;
3408
3409
/* 160 msec page scan interval */
3410
cp.interval = cpu_to_le16(0x0100);
3411
} else {
3412
type = hdev->def_page_scan_type;
3413
cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3414
}
3415
3416
cp.window = cpu_to_le16(hdev->def_page_scan_window);
3417
3418
if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3419
__cpu_to_le16(hdev->page_scan_window) != cp.window) {
3420
err = __hci_cmd_sync_status(hdev,
3421
HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3422
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3423
if (err)
3424
return err;
3425
}
3426
3427
if (hdev->page_scan_type != type)
3428
err = __hci_cmd_sync_status(hdev,
3429
HCI_OP_WRITE_PAGE_SCAN_TYPE,
3430
sizeof(type), &type,
3431
HCI_CMD_TIMEOUT);
3432
3433
return err;
3434
}
3435
3436
static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3437
{
3438
struct bdaddr_list *b;
3439
3440
list_for_each_entry(b, &hdev->accept_list, list) {
3441
struct hci_conn *conn;
3442
3443
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3444
if (!conn)
3445
return true;
3446
3447
if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3448
return true;
3449
}
3450
3451
return false;
3452
}
3453
3454
static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3455
{
3456
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3457
sizeof(val), &val,
3458
HCI_CMD_TIMEOUT);
3459
}
3460
3461
int hci_update_scan_sync(struct hci_dev *hdev)
3462
{
3463
u8 scan;
3464
3465
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3466
return 0;
3467
3468
if (!hdev_is_powered(hdev))
3469
return 0;
3470
3471
if (mgmt_powering_down(hdev))
3472
return 0;
3473
3474
if (hdev->scanning_paused)
3475
return 0;
3476
3477
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3478
disconnected_accept_list_entries(hdev))
3479
scan = SCAN_PAGE;
3480
else
3481
scan = SCAN_DISABLED;
3482
3483
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3484
scan |= SCAN_INQUIRY;
3485
3486
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3487
test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3488
return 0;
3489
3490
return hci_write_scan_enable_sync(hdev, scan);
3491
}
3492
3493
int hci_update_name_sync(struct hci_dev *hdev, const u8 *name)
3494
{
3495
struct hci_cp_write_local_name cp;
3496
3497
memset(&cp, 0, sizeof(cp));
3498
3499
memcpy(cp.name, name, sizeof(cp.name));
3500
3501
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3502
sizeof(cp), &cp,
3503
HCI_CMD_TIMEOUT);
3504
}
3505
3506
/* This function perform powered update HCI command sequence after the HCI init
3507
* sequence which end up resetting all states, the sequence is as follows:
3508
*
3509
* HCI_SSP_ENABLED(Enable SSP)
3510
* HCI_LE_ENABLED(Enable LE)
3511
* HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) ->
3512
* Update adv data)
3513
* Enable Authentication
3514
* lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3515
* Set Name -> Set EIR)
3516
* HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3517
*/
3518
int hci_powered_update_sync(struct hci_dev *hdev)
3519
{
3520
int err;
3521
3522
/* Register the available SMP channels (BR/EDR and LE) only when
3523
* successfully powering on the controller. This late
3524
* registration is required so that LE SMP can clearly decide if
3525
* the public address or static address is used.
3526
*/
3527
smp_register(hdev);
3528
3529
err = hci_write_ssp_mode_sync(hdev, 0x01);
3530
if (err)
3531
return err;
3532
3533
err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3534
if (err)
3535
return err;
3536
3537
err = hci_powered_update_adv_sync(hdev);
3538
if (err)
3539
return err;
3540
3541
err = hci_write_auth_enable_sync(hdev);
3542
if (err)
3543
return err;
3544
3545
if (lmp_bredr_capable(hdev)) {
3546
if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3547
hci_write_fast_connectable_sync(hdev, true);
3548
else
3549
hci_write_fast_connectable_sync(hdev, false);
3550
hci_update_scan_sync(hdev);
3551
hci_update_class_sync(hdev);
3552
hci_update_name_sync(hdev, hdev->dev_name);
3553
hci_update_eir_sync(hdev);
3554
}
3555
3556
/* If forcing static address is in use or there is no public
3557
* address use the static address as random address (but skip
3558
* the HCI command if the current random address is already the
3559
* static one.
3560
*
3561
* In case BR/EDR has been disabled on a dual-mode controller
3562
* and a static address has been configured, then use that
3563
* address instead of the public BR/EDR address.
3564
*/
3565
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3566
(!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3567
!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3568
if (bacmp(&hdev->static_addr, BDADDR_ANY))
3569
return hci_set_random_addr_sync(hdev,
3570
&hdev->static_addr);
3571
}
3572
3573
return 0;
3574
}
3575
3576
/**
3577
* hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3578
* (BD_ADDR) for a HCI device from
3579
* a firmware node property.
3580
* @hdev: The HCI device
3581
*
3582
* Search the firmware node for 'local-bd-address'.
3583
*
3584
* All-zero BD addresses are rejected, because those could be properties
3585
* that exist in the firmware tables, but were not updated by the firmware. For
3586
* example, the DTS could define 'local-bd-address', with zero BD addresses.
3587
*/
3588
static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3589
{
3590
struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3591
bdaddr_t ba;
3592
int ret;
3593
3594
ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3595
(u8 *)&ba, sizeof(ba));
3596
if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3597
return;
3598
3599
if (hci_test_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN))
3600
baswap(&hdev->public_addr, &ba);
3601
else
3602
bacpy(&hdev->public_addr, &ba);
3603
}
3604
3605
struct hci_init_stage {
3606
int (*func)(struct hci_dev *hdev);
3607
};
3608
3609
/* Run init stage NULL terminated function table */
3610
static int hci_init_stage_sync(struct hci_dev *hdev,
3611
const struct hci_init_stage *stage)
3612
{
3613
size_t i;
3614
3615
for (i = 0; stage[i].func; i++) {
3616
int err;
3617
3618
err = stage[i].func(hdev);
3619
if (err)
3620
return err;
3621
}
3622
3623
return 0;
3624
}
3625
3626
/* Read Local Version */
3627
static int hci_read_local_version_sync(struct hci_dev *hdev)
3628
{
3629
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3630
0, NULL, HCI_CMD_TIMEOUT);
3631
}
3632
3633
/* Read BD Address */
3634
static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3635
{
3636
return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3637
0, NULL, HCI_CMD_TIMEOUT);
3638
}
3639
3640
#define HCI_INIT(_func) \
3641
{ \
3642
.func = _func, \
3643
}
3644
3645
static const struct hci_init_stage hci_init0[] = {
3646
/* HCI_OP_READ_LOCAL_VERSION */
3647
HCI_INIT(hci_read_local_version_sync),
3648
/* HCI_OP_READ_BD_ADDR */
3649
HCI_INIT(hci_read_bd_addr_sync),
3650
{}
3651
};
3652
3653
int hci_reset_sync(struct hci_dev *hdev)
3654
{
3655
int err;
3656
3657
set_bit(HCI_RESET, &hdev->flags);
3658
3659
err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3660
HCI_CMD_TIMEOUT);
3661
if (err)
3662
return err;
3663
3664
return 0;
3665
}
3666
3667
static int hci_init0_sync(struct hci_dev *hdev)
3668
{
3669
int err;
3670
3671
bt_dev_dbg(hdev, "");
3672
3673
/* Reset */
3674
if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) {
3675
err = hci_reset_sync(hdev);
3676
if (err)
3677
return err;
3678
}
3679
3680
return hci_init_stage_sync(hdev, hci_init0);
3681
}
3682
3683
static int hci_unconf_init_sync(struct hci_dev *hdev)
3684
{
3685
int err;
3686
3687
if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
3688
return 0;
3689
3690
err = hci_init0_sync(hdev);
3691
if (err < 0)
3692
return err;
3693
3694
if (hci_dev_test_flag(hdev, HCI_SETUP))
3695
hci_debugfs_create_basic(hdev);
3696
3697
return 0;
3698
}
3699
3700
/* Read Local Supported Features. */
3701
static int hci_read_local_features_sync(struct hci_dev *hdev)
3702
{
3703
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3704
0, NULL, HCI_CMD_TIMEOUT);
3705
}
3706
3707
/* BR Controller init stage 1 command sequence */
3708
static const struct hci_init_stage br_init1[] = {
3709
/* HCI_OP_READ_LOCAL_FEATURES */
3710
HCI_INIT(hci_read_local_features_sync),
3711
/* HCI_OP_READ_LOCAL_VERSION */
3712
HCI_INIT(hci_read_local_version_sync),
3713
/* HCI_OP_READ_BD_ADDR */
3714
HCI_INIT(hci_read_bd_addr_sync),
3715
{}
3716
};
3717
3718
/* Read Local Commands */
3719
static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3720
{
3721
/* All Bluetooth 1.2 and later controllers should support the
3722
* HCI command for reading the local supported commands.
3723
*
3724
* Unfortunately some controllers indicate Bluetooth 1.2 support,
3725
* but do not have support for this command. If that is the case,
3726
* the driver can quirk the behavior and skip reading the local
3727
* supported commands.
3728
*/
3729
if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3730
!hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS))
3731
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3732
0, NULL, HCI_CMD_TIMEOUT);
3733
3734
return 0;
3735
}
3736
3737
static int hci_init1_sync(struct hci_dev *hdev)
3738
{
3739
int err;
3740
3741
bt_dev_dbg(hdev, "");
3742
3743
/* Reset */
3744
if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) {
3745
err = hci_reset_sync(hdev);
3746
if (err)
3747
return err;
3748
}
3749
3750
return hci_init_stage_sync(hdev, br_init1);
3751
}
3752
3753
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
3754
static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3755
{
3756
return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3757
0, NULL, HCI_CMD_TIMEOUT);
3758
}
3759
3760
/* Read Class of Device */
3761
static int hci_read_dev_class_sync(struct hci_dev *hdev)
3762
{
3763
return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3764
0, NULL, HCI_CMD_TIMEOUT);
3765
}
3766
3767
/* Read Local Name */
3768
static int hci_read_local_name_sync(struct hci_dev *hdev)
3769
{
3770
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3771
0, NULL, HCI_CMD_TIMEOUT);
3772
}
3773
3774
/* Read Voice Setting */
3775
static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3776
{
3777
if (!read_voice_setting_capable(hdev))
3778
return 0;
3779
3780
return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3781
0, NULL, HCI_CMD_TIMEOUT);
3782
}
3783
3784
/* Read Number of Supported IAC */
3785
static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3786
{
3787
return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3788
0, NULL, HCI_CMD_TIMEOUT);
3789
}
3790
3791
/* Read Current IAC LAP */
3792
static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3793
{
3794
return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3795
0, NULL, HCI_CMD_TIMEOUT);
3796
}
3797
3798
static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3799
u8 cond_type, bdaddr_t *bdaddr,
3800
u8 auto_accept)
3801
{
3802
struct hci_cp_set_event_filter cp;
3803
3804
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3805
return 0;
3806
3807
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
3808
return 0;
3809
3810
memset(&cp, 0, sizeof(cp));
3811
cp.flt_type = flt_type;
3812
3813
if (flt_type != HCI_FLT_CLEAR_ALL) {
3814
cp.cond_type = cond_type;
3815
bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3816
cp.addr_conn_flt.auto_accept = auto_accept;
3817
}
3818
3819
return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3820
flt_type == HCI_FLT_CLEAR_ALL ?
3821
sizeof(cp.flt_type) : sizeof(cp), &cp,
3822
HCI_CMD_TIMEOUT);
3823
}
3824
3825
static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3826
{
3827
if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3828
return 0;
3829
3830
/* In theory the state machine should not reach here unless
3831
* a hci_set_event_filter_sync() call succeeds, but we do
3832
* the check both for parity and as a future reminder.
3833
*/
3834
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
3835
return 0;
3836
3837
return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3838
BDADDR_ANY, 0x00);
3839
}
3840
3841
/* Connection accept timeout ~20 secs */
3842
static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3843
{
3844
__le16 param = cpu_to_le16(0x7d00);
3845
3846
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3847
sizeof(param), &param, HCI_CMD_TIMEOUT);
3848
}
3849
3850
/* Enable SCO flow control if supported */
3851
static int hci_write_sync_flowctl_sync(struct hci_dev *hdev)
3852
{
3853
struct hci_cp_write_sync_flowctl cp;
3854
int err;
3855
3856
/* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */
3857
if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) ||
3858
!hci_test_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED))
3859
return 0;
3860
3861
memset(&cp, 0, sizeof(cp));
3862
cp.enable = 0x01;
3863
3864
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL,
3865
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3866
if (!err)
3867
hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL);
3868
3869
return err;
3870
}
3871
3872
/* BR Controller init stage 2 command sequence */
3873
static const struct hci_init_stage br_init2[] = {
3874
/* HCI_OP_READ_BUFFER_SIZE */
3875
HCI_INIT(hci_read_buffer_size_sync),
3876
/* HCI_OP_READ_CLASS_OF_DEV */
3877
HCI_INIT(hci_read_dev_class_sync),
3878
/* HCI_OP_READ_LOCAL_NAME */
3879
HCI_INIT(hci_read_local_name_sync),
3880
/* HCI_OP_READ_VOICE_SETTING */
3881
HCI_INIT(hci_read_voice_setting_sync),
3882
/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3883
HCI_INIT(hci_read_num_supported_iac_sync),
3884
/* HCI_OP_READ_CURRENT_IAC_LAP */
3885
HCI_INIT(hci_read_current_iac_lap_sync),
3886
/* HCI_OP_SET_EVENT_FLT */
3887
HCI_INIT(hci_clear_event_filter_sync),
3888
/* HCI_OP_WRITE_CA_TIMEOUT */
3889
HCI_INIT(hci_write_ca_timeout_sync),
3890
/* HCI_OP_WRITE_SYNC_FLOWCTL */
3891
HCI_INIT(hci_write_sync_flowctl_sync),
3892
{}
3893
};
3894
3895
static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3896
{
3897
u8 mode = 0x01;
3898
3899
if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3900
return 0;
3901
3902
/* When SSP is available, then the host features page
3903
* should also be available as well. However some
3904
* controllers list the max_page as 0 as long as SSP
3905
* has not been enabled. To achieve proper debugging
3906
* output, force the minimum max_page to 1 at least.
3907
*/
3908
hdev->max_page = 0x01;
3909
3910
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3911
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3912
}
3913
3914
static int hci_write_eir_sync(struct hci_dev *hdev)
3915
{
3916
struct hci_cp_write_eir cp;
3917
3918
if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3919
return 0;
3920
3921
memset(hdev->eir, 0, sizeof(hdev->eir));
3922
memset(&cp, 0, sizeof(cp));
3923
3924
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3925
HCI_CMD_TIMEOUT);
3926
}
3927
3928
static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3929
{
3930
u8 mode;
3931
3932
if (!lmp_inq_rssi_capable(hdev) &&
3933
!hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE))
3934
return 0;
3935
3936
/* If Extended Inquiry Result events are supported, then
3937
* they are clearly preferred over Inquiry Result with RSSI
3938
* events.
3939
*/
3940
mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3941
3942
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3943
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3944
}
3945
3946
static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3947
{
3948
if (!lmp_inq_tx_pwr_capable(hdev))
3949
return 0;
3950
3951
return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3952
0, NULL, HCI_CMD_TIMEOUT);
3953
}
3954
3955
static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3956
{
3957
struct hci_cp_read_local_ext_features cp;
3958
3959
if (!lmp_ext_feat_capable(hdev))
3960
return 0;
3961
3962
memset(&cp, 0, sizeof(cp));
3963
cp.page = page;
3964
3965
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3966
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3967
}
3968
3969
static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3970
{
3971
return hci_read_local_ext_features_sync(hdev, 0x01);
3972
}
3973
3974
/* HCI Controller init stage 2 command sequence */
3975
static const struct hci_init_stage hci_init2[] = {
3976
/* HCI_OP_READ_LOCAL_COMMANDS */
3977
HCI_INIT(hci_read_local_cmds_sync),
3978
/* HCI_OP_WRITE_SSP_MODE */
3979
HCI_INIT(hci_write_ssp_mode_1_sync),
3980
/* HCI_OP_WRITE_EIR */
3981
HCI_INIT(hci_write_eir_sync),
3982
/* HCI_OP_WRITE_INQUIRY_MODE */
3983
HCI_INIT(hci_write_inquiry_mode_sync),
3984
/* HCI_OP_READ_INQ_RSP_TX_POWER */
3985
HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3986
/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3987
HCI_INIT(hci_read_local_ext_features_1_sync),
3988
/* HCI_OP_WRITE_AUTH_ENABLE */
3989
HCI_INIT(hci_write_auth_enable_sync),
3990
{}
3991
};
3992
3993
/* Read LE Buffer Size */
3994
static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3995
{
3996
/* Use Read LE Buffer Size V2 if supported */
3997
if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3998
return __hci_cmd_sync_status(hdev,
3999
HCI_OP_LE_READ_BUFFER_SIZE_V2,
4000
0, NULL, HCI_CMD_TIMEOUT);
4001
4002
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
4003
0, NULL, HCI_CMD_TIMEOUT);
4004
}
4005
4006
/* Read LE Local Supported Features */
4007
static int hci_le_read_local_features_sync(struct hci_dev *hdev)
4008
{
4009
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
4010
0, NULL, HCI_CMD_TIMEOUT);
4011
}
4012
4013
/* Read LE Supported States */
4014
static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
4015
{
4016
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
4017
0, NULL, HCI_CMD_TIMEOUT);
4018
}
4019
4020
/* LE Controller init stage 2 command sequence */
4021
static const struct hci_init_stage le_init2[] = {
4022
/* HCI_OP_LE_READ_LOCAL_FEATURES */
4023
HCI_INIT(hci_le_read_local_features_sync),
4024
/* HCI_OP_LE_READ_BUFFER_SIZE */
4025
HCI_INIT(hci_le_read_buffer_size_sync),
4026
/* HCI_OP_LE_READ_SUPPORTED_STATES */
4027
HCI_INIT(hci_le_read_supported_states_sync),
4028
{}
4029
};
4030
4031
static int hci_init2_sync(struct hci_dev *hdev)
4032
{
4033
int err;
4034
4035
bt_dev_dbg(hdev, "");
4036
4037
err = hci_init_stage_sync(hdev, hci_init2);
4038
if (err)
4039
return err;
4040
4041
if (lmp_bredr_capable(hdev)) {
4042
err = hci_init_stage_sync(hdev, br_init2);
4043
if (err)
4044
return err;
4045
} else {
4046
hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4047
}
4048
4049
if (lmp_le_capable(hdev)) {
4050
err = hci_init_stage_sync(hdev, le_init2);
4051
if (err)
4052
return err;
4053
/* LE-only controllers have LE implicitly enabled */
4054
if (!lmp_bredr_capable(hdev))
4055
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
4056
}
4057
4058
return 0;
4059
}
4060
4061
static int hci_set_event_mask_sync(struct hci_dev *hdev)
4062
{
4063
/* The second byte is 0xff instead of 0x9f (two reserved bits
4064
* disabled) since a Broadcom 1.2 dongle doesn't respond to the
4065
* command otherwise.
4066
*/
4067
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
4068
4069
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
4070
* any event mask for pre 1.2 devices.
4071
*/
4072
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
4073
return 0;
4074
4075
if (lmp_bredr_capable(hdev)) {
4076
events[4] |= 0x01; /* Flow Specification Complete */
4077
4078
/* Don't set Disconnect Complete and mode change when
4079
* suspended as that would wakeup the host when disconnecting
4080
* due to suspend.
4081
*/
4082
if (hdev->suspended) {
4083
events[0] &= 0xef;
4084
events[2] &= 0xf7;
4085
}
4086
} else {
4087
/* Use a different default for LE-only devices */
4088
memset(events, 0, sizeof(events));
4089
events[1] |= 0x20; /* Command Complete */
4090
events[1] |= 0x40; /* Command Status */
4091
events[1] |= 0x80; /* Hardware Error */
4092
4093
/* If the controller supports the Disconnect command, enable
4094
* the corresponding event. In addition enable packet flow
4095
* control related events.
4096
*/
4097
if (hdev->commands[0] & 0x20) {
4098
/* Don't set Disconnect Complete when suspended as that
4099
* would wakeup the host when disconnecting due to
4100
* suspend.
4101
*/
4102
if (!hdev->suspended)
4103
events[0] |= 0x10; /* Disconnection Complete */
4104
events[2] |= 0x04; /* Number of Completed Packets */
4105
events[3] |= 0x02; /* Data Buffer Overflow */
4106
}
4107
4108
/* If the controller supports the Read Remote Version
4109
* Information command, enable the corresponding event.
4110
*/
4111
if (hdev->commands[2] & 0x80)
4112
events[1] |= 0x08; /* Read Remote Version Information
4113
* Complete
4114
*/
4115
4116
if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
4117
events[0] |= 0x80; /* Encryption Change */
4118
events[5] |= 0x80; /* Encryption Key Refresh Complete */
4119
}
4120
}
4121
4122
if (lmp_inq_rssi_capable(hdev) ||
4123
hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE))
4124
events[4] |= 0x02; /* Inquiry Result with RSSI */
4125
4126
if (lmp_ext_feat_capable(hdev))
4127
events[4] |= 0x04; /* Read Remote Extended Features Complete */
4128
4129
if (lmp_esco_capable(hdev)) {
4130
events[5] |= 0x08; /* Synchronous Connection Complete */
4131
events[5] |= 0x10; /* Synchronous Connection Changed */
4132
}
4133
4134
if (lmp_sniffsubr_capable(hdev))
4135
events[5] |= 0x20; /* Sniff Subrating */
4136
4137
if (lmp_pause_enc_capable(hdev))
4138
events[5] |= 0x80; /* Encryption Key Refresh Complete */
4139
4140
if (lmp_ext_inq_capable(hdev))
4141
events[5] |= 0x40; /* Extended Inquiry Result */
4142
4143
if (lmp_no_flush_capable(hdev))
4144
events[7] |= 0x01; /* Enhanced Flush Complete */
4145
4146
if (lmp_lsto_capable(hdev))
4147
events[6] |= 0x80; /* Link Supervision Timeout Changed */
4148
4149
if (lmp_ssp_capable(hdev)) {
4150
events[6] |= 0x01; /* IO Capability Request */
4151
events[6] |= 0x02; /* IO Capability Response */
4152
events[6] |= 0x04; /* User Confirmation Request */
4153
events[6] |= 0x08; /* User Passkey Request */
4154
events[6] |= 0x10; /* Remote OOB Data Request */
4155
events[6] |= 0x20; /* Simple Pairing Complete */
4156
events[7] |= 0x04; /* User Passkey Notification */
4157
events[7] |= 0x08; /* Keypress Notification */
4158
events[7] |= 0x10; /* Remote Host Supported
4159
* Features Notification
4160
*/
4161
}
4162
4163
if (lmp_le_capable(hdev))
4164
events[7] |= 0x20; /* LE Meta-Event */
4165
4166
return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4167
sizeof(events), events, HCI_CMD_TIMEOUT);
4168
}
4169
4170
static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4171
{
4172
struct hci_cp_read_stored_link_key cp;
4173
4174
if (!(hdev->commands[6] & 0x20) ||
4175
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY))
4176
return 0;
4177
4178
memset(&cp, 0, sizeof(cp));
4179
bacpy(&cp.bdaddr, BDADDR_ANY);
4180
cp.read_all = 0x01;
4181
4182
return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4183
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4184
}
4185
4186
static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4187
{
4188
struct hci_cp_write_def_link_policy cp;
4189
u16 link_policy = 0;
4190
4191
if (!(hdev->commands[5] & 0x10))
4192
return 0;
4193
4194
memset(&cp, 0, sizeof(cp));
4195
4196
if (lmp_rswitch_capable(hdev))
4197
link_policy |= HCI_LP_RSWITCH;
4198
if (lmp_hold_capable(hdev))
4199
link_policy |= HCI_LP_HOLD;
4200
if (lmp_sniff_capable(hdev))
4201
link_policy |= HCI_LP_SNIFF;
4202
if (lmp_park_capable(hdev))
4203
link_policy |= HCI_LP_PARK;
4204
4205
cp.policy = cpu_to_le16(link_policy);
4206
4207
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4208
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4209
}
4210
4211
static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4212
{
4213
if (!(hdev->commands[8] & 0x01))
4214
return 0;
4215
4216
return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4217
0, NULL, HCI_CMD_TIMEOUT);
4218
}
4219
4220
static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4221
{
4222
if (!(hdev->commands[18] & 0x04) ||
4223
!(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4224
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING))
4225
return 0;
4226
4227
return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4228
0, NULL, HCI_CMD_TIMEOUT);
4229
}
4230
4231
static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4232
{
4233
/* Some older Broadcom based Bluetooth 1.2 controllers do not
4234
* support the Read Page Scan Type command. Check support for
4235
* this command in the bit mask of supported commands.
4236
*/
4237
if (!(hdev->commands[13] & 0x01) ||
4238
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE))
4239
return 0;
4240
4241
return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4242
0, NULL, HCI_CMD_TIMEOUT);
4243
}
4244
4245
/* Read features beyond page 1 if available */
4246
static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4247
{
4248
u8 page;
4249
int err;
4250
4251
if (!lmp_ext_feat_capable(hdev))
4252
return 0;
4253
4254
for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4255
page++) {
4256
err = hci_read_local_ext_features_sync(hdev, page);
4257
if (err)
4258
return err;
4259
}
4260
4261
return 0;
4262
}
4263
4264
/* HCI Controller init stage 3 command sequence */
4265
static const struct hci_init_stage hci_init3[] = {
4266
/* HCI_OP_SET_EVENT_MASK */
4267
HCI_INIT(hci_set_event_mask_sync),
4268
/* HCI_OP_READ_STORED_LINK_KEY */
4269
HCI_INIT(hci_read_stored_link_key_sync),
4270
/* HCI_OP_WRITE_DEF_LINK_POLICY */
4271
HCI_INIT(hci_setup_link_policy_sync),
4272
/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4273
HCI_INIT(hci_read_page_scan_activity_sync),
4274
/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4275
HCI_INIT(hci_read_def_err_data_reporting_sync),
4276
/* HCI_OP_READ_PAGE_SCAN_TYPE */
4277
HCI_INIT(hci_read_page_scan_type_sync),
4278
/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4279
HCI_INIT(hci_read_local_ext_features_all_sync),
4280
{}
4281
};
4282
4283
static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4284
{
4285
u8 events[8];
4286
4287
if (!lmp_le_capable(hdev))
4288
return 0;
4289
4290
memset(events, 0, sizeof(events));
4291
4292
if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4293
events[0] |= 0x10; /* LE Long Term Key Request */
4294
4295
/* If controller supports the Connection Parameters Request
4296
* Link Layer Procedure, enable the corresponding event.
4297
*/
4298
if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4299
/* LE Remote Connection Parameter Request */
4300
events[0] |= 0x20;
4301
4302
/* If the controller supports the Data Length Extension
4303
* feature, enable the corresponding event.
4304
*/
4305
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4306
events[0] |= 0x40; /* LE Data Length Change */
4307
4308
/* If the controller supports LL Privacy feature or LE Extended Adv,
4309
* enable the corresponding event.
4310
*/
4311
if (use_enhanced_conn_complete(hdev))
4312
events[1] |= 0x02; /* LE Enhanced Connection Complete */
4313
4314
/* Mark Device Privacy if Privacy Mode is supported */
4315
if (privacy_mode_capable(hdev))
4316
hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4317
4318
/* Mark Address Resolution if LL Privacy is supported */
4319
if (ll_privacy_capable(hdev))
4320
hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION;
4321
4322
/* If the controller supports Extended Scanner Filter
4323
* Policies, enable the corresponding event.
4324
*/
4325
if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4326
events[1] |= 0x04; /* LE Direct Advertising Report */
4327
4328
/* If the controller supports Channel Selection Algorithm #2
4329
* feature, enable the corresponding event.
4330
*/
4331
if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4332
events[2] |= 0x08; /* LE Channel Selection Algorithm */
4333
4334
/* If the controller supports the LE Set Scan Enable command,
4335
* enable the corresponding advertising report event.
4336
*/
4337
if (hdev->commands[26] & 0x08)
4338
events[0] |= 0x02; /* LE Advertising Report */
4339
4340
/* If the controller supports the LE Create Connection
4341
* command, enable the corresponding event.
4342
*/
4343
if (hdev->commands[26] & 0x10)
4344
events[0] |= 0x01; /* LE Connection Complete */
4345
4346
/* If the controller supports the LE Connection Update
4347
* command, enable the corresponding event.
4348
*/
4349
if (hdev->commands[27] & 0x04)
4350
events[0] |= 0x04; /* LE Connection Update Complete */
4351
4352
/* If the controller supports the LE Read Remote Used Features
4353
* command, enable the corresponding event.
4354
*/
4355
if (hdev->commands[27] & 0x20)
4356
/* LE Read Remote Used Features Complete */
4357
events[0] |= 0x08;
4358
4359
/* If the controller supports the LE Read Local P-256
4360
* Public Key command, enable the corresponding event.
4361
*/
4362
if (hdev->commands[34] & 0x02)
4363
/* LE Read Local P-256 Public Key Complete */
4364
events[0] |= 0x80;
4365
4366
/* If the controller supports the LE Generate DHKey
4367
* command, enable the corresponding event.
4368
*/
4369
if (hdev->commands[34] & 0x04)
4370
events[1] |= 0x01; /* LE Generate DHKey Complete */
4371
4372
/* If the controller supports the LE Set Default PHY or
4373
* LE Set PHY commands, enable the corresponding event.
4374
*/
4375
if (hdev->commands[35] & (0x20 | 0x40))
4376
events[1] |= 0x08; /* LE PHY Update Complete */
4377
4378
/* If the controller supports LE Set Extended Scan Parameters
4379
* and LE Set Extended Scan Enable commands, enable the
4380
* corresponding event.
4381
*/
4382
if (use_ext_scan(hdev))
4383
events[1] |= 0x10; /* LE Extended Advertising Report */
4384
4385
/* If the controller supports the LE Extended Advertising
4386
* command, enable the corresponding event.
4387
*/
4388
if (ext_adv_capable(hdev))
4389
events[2] |= 0x02; /* LE Advertising Set Terminated */
4390
4391
if (cis_capable(hdev)) {
4392
events[3] |= 0x01; /* LE CIS Established */
4393
if (cis_peripheral_capable(hdev))
4394
events[3] |= 0x02; /* LE CIS Request */
4395
}
4396
4397
if (bis_capable(hdev)) {
4398
events[1] |= 0x20; /* LE PA Report */
4399
events[1] |= 0x40; /* LE PA Sync Established */
4400
events[3] |= 0x04; /* LE Create BIG Complete */
4401
events[3] |= 0x08; /* LE Terminate BIG Complete */
4402
events[3] |= 0x10; /* LE BIG Sync Established */
4403
events[3] |= 0x20; /* LE BIG Sync Loss */
4404
events[4] |= 0x02; /* LE BIG Info Advertising Report */
4405
}
4406
4407
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4408
sizeof(events), events, HCI_CMD_TIMEOUT);
4409
}
4410
4411
/* Read LE Advertising Channel TX Power */
4412
static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4413
{
4414
if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4415
/* HCI TS spec forbids mixing of legacy and extended
4416
* advertising commands wherein READ_ADV_TX_POWER is
4417
* also included. So do not call it if extended adv
4418
* is supported otherwise controller will return
4419
* COMMAND_DISALLOWED for extended commands.
4420
*/
4421
return __hci_cmd_sync_status(hdev,
4422
HCI_OP_LE_READ_ADV_TX_POWER,
4423
0, NULL, HCI_CMD_TIMEOUT);
4424
}
4425
4426
return 0;
4427
}
4428
4429
/* Read LE Min/Max Tx Power*/
4430
static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4431
{
4432
if (!(hdev->commands[38] & 0x80) ||
4433
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER))
4434
return 0;
4435
4436
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4437
0, NULL, HCI_CMD_TIMEOUT);
4438
}
4439
4440
/* Read LE Accept List Size */
4441
static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4442
{
4443
if (!(hdev->commands[26] & 0x40))
4444
return 0;
4445
4446
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4447
0, NULL, HCI_CMD_TIMEOUT);
4448
}
4449
4450
/* Read LE Resolving List Size */
4451
static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4452
{
4453
if (!(hdev->commands[34] & 0x40))
4454
return 0;
4455
4456
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4457
0, NULL, HCI_CMD_TIMEOUT);
4458
}
4459
4460
/* Clear LE Resolving List */
4461
static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4462
{
4463
if (!(hdev->commands[34] & 0x20))
4464
return 0;
4465
4466
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4467
HCI_CMD_TIMEOUT);
4468
}
4469
4470
/* Set RPA timeout */
4471
static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4472
{
4473
__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4474
4475
if (!(hdev->commands[35] & 0x04) ||
4476
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT))
4477
return 0;
4478
4479
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4480
sizeof(timeout), &timeout,
4481
HCI_CMD_TIMEOUT);
4482
}
4483
4484
/* Read LE Maximum Data Length */
4485
static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4486
{
4487
if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4488
return 0;
4489
4490
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4491
HCI_CMD_TIMEOUT);
4492
}
4493
4494
/* Read LE Suggested Default Data Length */
4495
static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4496
{
4497
if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4498
return 0;
4499
4500
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4501
HCI_CMD_TIMEOUT);
4502
}
4503
4504
/* Read LE Number of Supported Advertising Sets */
4505
static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4506
{
4507
if (!ext_adv_capable(hdev))
4508
return 0;
4509
4510
return __hci_cmd_sync_status(hdev,
4511
HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4512
0, NULL, HCI_CMD_TIMEOUT);
4513
}
4514
4515
/* Write LE Host Supported */
4516
static int hci_set_le_support_sync(struct hci_dev *hdev)
4517
{
4518
struct hci_cp_write_le_host_supported cp;
4519
4520
/* LE-only devices do not support explicit enablement */
4521
if (!lmp_bredr_capable(hdev))
4522
return 0;
4523
4524
memset(&cp, 0, sizeof(cp));
4525
4526
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4527
cp.le = 0x01;
4528
cp.simul = 0x00;
4529
}
4530
4531
if (cp.le == lmp_host_le_capable(hdev))
4532
return 0;
4533
4534
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4535
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4536
}
4537
4538
/* LE Set Host Feature */
4539
static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4540
{
4541
struct hci_cp_le_set_host_feature cp;
4542
4543
if (!iso_capable(hdev))
4544
return 0;
4545
4546
memset(&cp, 0, sizeof(cp));
4547
4548
/* Connected Isochronous Channels (Host Support) */
4549
cp.bit_number = 32;
4550
cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00;
4551
4552
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4553
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4554
}
4555
4556
/* LE Controller init stage 3 command sequence */
4557
static const struct hci_init_stage le_init3[] = {
4558
/* HCI_OP_LE_SET_EVENT_MASK */
4559
HCI_INIT(hci_le_set_event_mask_sync),
4560
/* HCI_OP_LE_READ_ADV_TX_POWER */
4561
HCI_INIT(hci_le_read_adv_tx_power_sync),
4562
/* HCI_OP_LE_READ_TRANSMIT_POWER */
4563
HCI_INIT(hci_le_read_tx_power_sync),
4564
/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4565
HCI_INIT(hci_le_read_accept_list_size_sync),
4566
/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4567
HCI_INIT(hci_le_clear_accept_list_sync),
4568
/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4569
HCI_INIT(hci_le_read_resolv_list_size_sync),
4570
/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4571
HCI_INIT(hci_le_clear_resolv_list_sync),
4572
/* HCI_OP_LE_SET_RPA_TIMEOUT */
4573
HCI_INIT(hci_le_set_rpa_timeout_sync),
4574
/* HCI_OP_LE_READ_MAX_DATA_LEN */
4575
HCI_INIT(hci_le_read_max_data_len_sync),
4576
/* HCI_OP_LE_READ_DEF_DATA_LEN */
4577
HCI_INIT(hci_le_read_def_data_len_sync),
4578
/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4579
HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4580
/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4581
HCI_INIT(hci_set_le_support_sync),
4582
/* HCI_OP_LE_SET_HOST_FEATURE */
4583
HCI_INIT(hci_le_set_host_feature_sync),
4584
{}
4585
};
4586
4587
static int hci_init3_sync(struct hci_dev *hdev)
4588
{
4589
int err;
4590
4591
bt_dev_dbg(hdev, "");
4592
4593
err = hci_init_stage_sync(hdev, hci_init3);
4594
if (err)
4595
return err;
4596
4597
if (lmp_le_capable(hdev))
4598
return hci_init_stage_sync(hdev, le_init3);
4599
4600
return 0;
4601
}
4602
4603
static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4604
{
4605
struct hci_cp_delete_stored_link_key cp;
4606
4607
/* Some Broadcom based Bluetooth controllers do not support the
4608
* Delete Stored Link Key command. They are clearly indicating its
4609
* absence in the bit mask of supported commands.
4610
*
4611
* Check the supported commands and only if the command is marked
4612
* as supported send it. If not supported assume that the controller
4613
* does not have actual support for stored link keys which makes this
4614
* command redundant anyway.
4615
*
4616
* Some controllers indicate that they support handling deleting
4617
* stored link keys, but they don't. The quirk lets a driver
4618
* just disable this command.
4619
*/
4620
if (!(hdev->commands[6] & 0x80) ||
4621
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY))
4622
return 0;
4623
4624
memset(&cp, 0, sizeof(cp));
4625
bacpy(&cp.bdaddr, BDADDR_ANY);
4626
cp.delete_all = 0x01;
4627
4628
return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4629
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4630
}
4631
4632
static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4633
{
4634
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4635
bool changed = false;
4636
4637
/* Set event mask page 2 if the HCI command for it is supported */
4638
if (!(hdev->commands[22] & 0x04))
4639
return 0;
4640
4641
/* If Connectionless Peripheral Broadcast central role is supported
4642
* enable all necessary events for it.
4643
*/
4644
if (lmp_cpb_central_capable(hdev)) {
4645
events[1] |= 0x40; /* Triggered Clock Capture */
4646
events[1] |= 0x80; /* Synchronization Train Complete */
4647
events[2] |= 0x08; /* Truncated Page Complete */
4648
events[2] |= 0x20; /* CPB Channel Map Change */
4649
changed = true;
4650
}
4651
4652
/* If Connectionless Peripheral Broadcast peripheral role is supported
4653
* enable all necessary events for it.
4654
*/
4655
if (lmp_cpb_peripheral_capable(hdev)) {
4656
events[2] |= 0x01; /* Synchronization Train Received */
4657
events[2] |= 0x02; /* CPB Receive */
4658
events[2] |= 0x04; /* CPB Timeout */
4659
events[2] |= 0x10; /* Peripheral Page Response Timeout */
4660
changed = true;
4661
}
4662
4663
/* Enable Authenticated Payload Timeout Expired event if supported */
4664
if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4665
events[2] |= 0x80;
4666
changed = true;
4667
}
4668
4669
/* Some Broadcom based controllers indicate support for Set Event
4670
* Mask Page 2 command, but then actually do not support it. Since
4671
* the default value is all bits set to zero, the command is only
4672
* required if the event mask has to be changed. In case no change
4673
* to the event mask is needed, skip this command.
4674
*/
4675
if (!changed)
4676
return 0;
4677
4678
return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4679
sizeof(events), events, HCI_CMD_TIMEOUT);
4680
}
4681
4682
/* Read local codec list if the HCI command is supported */
4683
static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4684
{
4685
if (hdev->commands[45] & 0x04)
4686
hci_read_supported_codecs_v2(hdev);
4687
else if (hdev->commands[29] & 0x20)
4688
hci_read_supported_codecs(hdev);
4689
4690
return 0;
4691
}
4692
4693
/* Read local pairing options if the HCI command is supported */
4694
static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4695
{
4696
if (!(hdev->commands[41] & 0x08))
4697
return 0;
4698
4699
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4700
0, NULL, HCI_CMD_TIMEOUT);
4701
}
4702
4703
/* Get MWS transport configuration if the HCI command is supported */
4704
static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4705
{
4706
if (!mws_transport_config_capable(hdev))
4707
return 0;
4708
4709
return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4710
0, NULL, HCI_CMD_TIMEOUT);
4711
}
4712
4713
/* Check for Synchronization Train support */
4714
static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4715
{
4716
if (!lmp_sync_train_capable(hdev))
4717
return 0;
4718
4719
return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4720
0, NULL, HCI_CMD_TIMEOUT);
4721
}
4722
4723
/* Enable Secure Connections if supported and configured */
4724
static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4725
{
4726
u8 support = 0x01;
4727
4728
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4729
!bredr_sc_enabled(hdev))
4730
return 0;
4731
4732
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4733
sizeof(support), &support,
4734
HCI_CMD_TIMEOUT);
4735
}
4736
4737
/* Set erroneous data reporting if supported to the wideband speech
4738
* setting value
4739
*/
4740
static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4741
{
4742
struct hci_cp_write_def_err_data_reporting cp;
4743
bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4744
4745
if (!(hdev->commands[18] & 0x08) ||
4746
!(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4747
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING))
4748
return 0;
4749
4750
if (enabled == hdev->err_data_reporting)
4751
return 0;
4752
4753
memset(&cp, 0, sizeof(cp));
4754
cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4755
ERR_DATA_REPORTING_DISABLED;
4756
4757
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4758
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4759
}
4760
4761
static const struct hci_init_stage hci_init4[] = {
4762
/* HCI_OP_DELETE_STORED_LINK_KEY */
4763
HCI_INIT(hci_delete_stored_link_key_sync),
4764
/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4765
HCI_INIT(hci_set_event_mask_page_2_sync),
4766
/* HCI_OP_READ_LOCAL_CODECS */
4767
HCI_INIT(hci_read_local_codecs_sync),
4768
/* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4769
HCI_INIT(hci_read_local_pairing_opts_sync),
4770
/* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4771
HCI_INIT(hci_get_mws_transport_config_sync),
4772
/* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4773
HCI_INIT(hci_read_sync_train_params_sync),
4774
/* HCI_OP_WRITE_SC_SUPPORT */
4775
HCI_INIT(hci_write_sc_support_1_sync),
4776
/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4777
HCI_INIT(hci_set_err_data_report_sync),
4778
{}
4779
};
4780
4781
/* Set Suggested Default Data Length to maximum if supported */
4782
static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4783
{
4784
struct hci_cp_le_write_def_data_len cp;
4785
4786
if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4787
return 0;
4788
4789
memset(&cp, 0, sizeof(cp));
4790
cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4791
cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4792
4793
return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4794
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4795
}
4796
4797
/* Set Default PHY parameters if command is supported, enables all supported
4798
* PHYs according to the LE Features bits.
4799
*/
4800
static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4801
{
4802
struct hci_cp_le_set_default_phy cp;
4803
4804
if (!(hdev->commands[35] & 0x20)) {
4805
/* If the command is not supported it means only 1M PHY is
4806
* supported.
4807
*/
4808
hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4809
hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4810
return 0;
4811
}
4812
4813
memset(&cp, 0, sizeof(cp));
4814
cp.all_phys = 0x00;
4815
cp.tx_phys = HCI_LE_SET_PHY_1M;
4816
cp.rx_phys = HCI_LE_SET_PHY_1M;
4817
4818
/* Enables 2M PHY if supported */
4819
if (le_2m_capable(hdev)) {
4820
cp.tx_phys |= HCI_LE_SET_PHY_2M;
4821
cp.rx_phys |= HCI_LE_SET_PHY_2M;
4822
}
4823
4824
/* Enables Coded PHY if supported */
4825
if (le_coded_capable(hdev)) {
4826
cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4827
cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4828
}
4829
4830
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4831
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4832
}
4833
4834
static const struct hci_init_stage le_init4[] = {
4835
/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4836
HCI_INIT(hci_le_set_write_def_data_len_sync),
4837
/* HCI_OP_LE_SET_DEFAULT_PHY */
4838
HCI_INIT(hci_le_set_default_phy_sync),
4839
{}
4840
};
4841
4842
static int hci_init4_sync(struct hci_dev *hdev)
4843
{
4844
int err;
4845
4846
bt_dev_dbg(hdev, "");
4847
4848
err = hci_init_stage_sync(hdev, hci_init4);
4849
if (err)
4850
return err;
4851
4852
if (lmp_le_capable(hdev))
4853
return hci_init_stage_sync(hdev, le_init4);
4854
4855
return 0;
4856
}
4857
4858
static int hci_init_sync(struct hci_dev *hdev)
4859
{
4860
int err;
4861
4862
err = hci_init1_sync(hdev);
4863
if (err < 0)
4864
return err;
4865
4866
if (hci_dev_test_flag(hdev, HCI_SETUP))
4867
hci_debugfs_create_basic(hdev);
4868
4869
err = hci_init2_sync(hdev);
4870
if (err < 0)
4871
return err;
4872
4873
err = hci_init3_sync(hdev);
4874
if (err < 0)
4875
return err;
4876
4877
err = hci_init4_sync(hdev);
4878
if (err < 0)
4879
return err;
4880
4881
/* This function is only called when the controller is actually in
4882
* configured state. When the controller is marked as unconfigured,
4883
* this initialization procedure is not run.
4884
*
4885
* It means that it is possible that a controller runs through its
4886
* setup phase and then discovers missing settings. If that is the
4887
* case, then this function will not be called. It then will only
4888
* be called during the config phase.
4889
*
4890
* So only when in setup phase or config phase, create the debugfs
4891
* entries and register the SMP channels.
4892
*/
4893
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4894
!hci_dev_test_flag(hdev, HCI_CONFIG))
4895
return 0;
4896
4897
if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4898
return 0;
4899
4900
hci_debugfs_create_common(hdev);
4901
4902
if (lmp_bredr_capable(hdev))
4903
hci_debugfs_create_bredr(hdev);
4904
4905
if (lmp_le_capable(hdev))
4906
hci_debugfs_create_le(hdev);
4907
4908
return 0;
4909
}
4910
4911
#define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4912
4913
static const struct {
4914
unsigned long quirk;
4915
const char *desc;
4916
} hci_broken_table[] = {
4917
HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4918
"HCI Read Local Supported Commands not supported"),
4919
HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4920
"HCI Delete Stored Link Key command is advertised, "
4921
"but not supported."),
4922
HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4923
"HCI Read Default Erroneous Data Reporting command is "
4924
"advertised, but not supported."),
4925
HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4926
"HCI Read Transmit Power Level command is advertised, "
4927
"but not supported."),
4928
HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4929
"HCI Set Event Filter command not supported."),
4930
HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4931
"HCI Enhanced Setup Synchronous Connection command is "
4932
"advertised, but not supported."),
4933
HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4934
"HCI LE Set Random Private Address Timeout command is "
4935
"advertised, but not supported."),
4936
HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
4937
"HCI LE Extended Create Connection command is "
4938
"advertised, but not supported."),
4939
HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
4940
"HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
4941
"to unexpected SMP errors when pairing "
4942
"and will not be used."),
4943
HCI_QUIRK_BROKEN(LE_CODED,
4944
"HCI LE Coded PHY feature bit is set, "
4945
"but its usage is not supported.")
4946
};
4947
4948
/* This function handles hdev setup stage:
4949
*
4950
* Calls hdev->setup
4951
* Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4952
*/
4953
static int hci_dev_setup_sync(struct hci_dev *hdev)
4954
{
4955
int ret = 0;
4956
bool invalid_bdaddr;
4957
size_t i;
4958
4959
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4960
!hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP))
4961
return 0;
4962
4963
bt_dev_dbg(hdev, "");
4964
4965
hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4966
4967
if (hdev->setup)
4968
ret = hdev->setup(hdev);
4969
4970
for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4971
if (hci_test_quirk(hdev, hci_broken_table[i].quirk))
4972
bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4973
}
4974
4975
/* The transport driver can set the quirk to mark the
4976
* BD_ADDR invalid before creating the HCI device or in
4977
* its setup callback.
4978
*/
4979
invalid_bdaddr = hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
4980
hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
4981
if (!ret) {
4982
if (hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY) &&
4983
!bacmp(&hdev->public_addr, BDADDR_ANY))
4984
hci_dev_get_bd_addr_from_property(hdev);
4985
4986
if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4987
hdev->set_bdaddr) {
4988
ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4989
if (!ret)
4990
invalid_bdaddr = false;
4991
}
4992
}
4993
4994
/* The transport driver can set these quirks before
4995
* creating the HCI device or in its setup callback.
4996
*
4997
* For the invalid BD_ADDR quirk it is possible that
4998
* it becomes a valid address if the bootloader does
4999
* provide it (see above).
5000
*
5001
* In case any of them is set, the controller has to
5002
* start up as unconfigured.
5003
*/
5004
if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) ||
5005
invalid_bdaddr)
5006
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
5007
5008
/* For an unconfigured controller it is required to
5009
* read at least the version information provided by
5010
* the Read Local Version Information command.
5011
*
5012
* If the set_bdaddr driver callback is provided, then
5013
* also the original Bluetooth public device address
5014
* will be read using the Read BD Address command.
5015
*/
5016
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5017
return hci_unconf_init_sync(hdev);
5018
5019
return ret;
5020
}
5021
5022
/* This function handles hdev init stage:
5023
*
5024
* Calls hci_dev_setup_sync to perform setup stage
5025
* Calls hci_init_sync to perform HCI command init sequence
5026
*/
5027
static int hci_dev_init_sync(struct hci_dev *hdev)
5028
{
5029
int ret;
5030
5031
bt_dev_dbg(hdev, "");
5032
5033
atomic_set(&hdev->cmd_cnt, 1);
5034
set_bit(HCI_INIT, &hdev->flags);
5035
5036
ret = hci_dev_setup_sync(hdev);
5037
5038
if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
5039
/* If public address change is configured, ensure that
5040
* the address gets programmed. If the driver does not
5041
* support changing the public address, fail the power
5042
* on procedure.
5043
*/
5044
if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
5045
hdev->set_bdaddr)
5046
ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
5047
else
5048
ret = -EADDRNOTAVAIL;
5049
}
5050
5051
if (!ret) {
5052
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5053
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5054
ret = hci_init_sync(hdev);
5055
if (!ret && hdev->post_init)
5056
ret = hdev->post_init(hdev);
5057
}
5058
}
5059
5060
/* If the HCI Reset command is clearing all diagnostic settings,
5061
* then they need to be reprogrammed after the init procedure
5062
* completed.
5063
*/
5064
if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) &&
5065
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5066
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
5067
ret = hdev->set_diag(hdev, true);
5068
5069
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5070
msft_do_open(hdev);
5071
aosp_do_open(hdev);
5072
}
5073
5074
clear_bit(HCI_INIT, &hdev->flags);
5075
5076
return ret;
5077
}
5078
5079
int hci_dev_open_sync(struct hci_dev *hdev)
5080
{
5081
int ret;
5082
5083
bt_dev_dbg(hdev, "");
5084
5085
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5086
ret = -ENODEV;
5087
goto done;
5088
}
5089
5090
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5091
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
5092
/* Check for rfkill but allow the HCI setup stage to
5093
* proceed (which in itself doesn't cause any RF activity).
5094
*/
5095
if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
5096
ret = -ERFKILL;
5097
goto done;
5098
}
5099
5100
/* Check for valid public address or a configured static
5101
* random address, but let the HCI setup proceed to
5102
* be able to determine if there is a public address
5103
* or not.
5104
*
5105
* In case of user channel usage, it is not important
5106
* if a public address or static random address is
5107
* available.
5108
*/
5109
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5110
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5111
!bacmp(&hdev->static_addr, BDADDR_ANY)) {
5112
ret = -EADDRNOTAVAIL;
5113
goto done;
5114
}
5115
}
5116
5117
if (test_bit(HCI_UP, &hdev->flags)) {
5118
ret = -EALREADY;
5119
goto done;
5120
}
5121
5122
if (hdev->open(hdev)) {
5123
ret = -EIO;
5124
goto done;
5125
}
5126
5127
hci_devcd_reset(hdev);
5128
5129
set_bit(HCI_RUNNING, &hdev->flags);
5130
hci_sock_dev_event(hdev, HCI_DEV_OPEN);
5131
5132
ret = hci_dev_init_sync(hdev);
5133
if (!ret) {
5134
hci_dev_hold(hdev);
5135
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5136
hci_adv_instances_set_rpa_expired(hdev, true);
5137
set_bit(HCI_UP, &hdev->flags);
5138
hci_sock_dev_event(hdev, HCI_DEV_UP);
5139
hci_leds_update_powered(hdev, true);
5140
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5141
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
5142
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5143
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5144
hci_dev_test_flag(hdev, HCI_MGMT)) {
5145
ret = hci_powered_update_sync(hdev);
5146
mgmt_power_on(hdev, ret);
5147
}
5148
} else {
5149
/* Init failed, cleanup */
5150
flush_work(&hdev->tx_work);
5151
5152
/* Since hci_rx_work() is possible to awake new cmd_work
5153
* it should be flushed first to avoid unexpected call of
5154
* hci_cmd_work()
5155
*/
5156
flush_work(&hdev->rx_work);
5157
flush_work(&hdev->cmd_work);
5158
5159
skb_queue_purge(&hdev->cmd_q);
5160
skb_queue_purge(&hdev->rx_q);
5161
5162
if (hdev->flush)
5163
hdev->flush(hdev);
5164
5165
if (hdev->sent_cmd) {
5166
cancel_delayed_work_sync(&hdev->cmd_timer);
5167
kfree_skb(hdev->sent_cmd);
5168
hdev->sent_cmd = NULL;
5169
}
5170
5171
if (hdev->req_skb) {
5172
kfree_skb(hdev->req_skb);
5173
hdev->req_skb = NULL;
5174
}
5175
5176
clear_bit(HCI_RUNNING, &hdev->flags);
5177
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5178
5179
hdev->close(hdev);
5180
hdev->flags &= BIT(HCI_RAW);
5181
}
5182
5183
done:
5184
return ret;
5185
}
5186
5187
/* This function requires the caller holds hdev->lock */
5188
static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5189
{
5190
struct hci_conn_params *p;
5191
5192
list_for_each_entry(p, &hdev->le_conn_params, list) {
5193
hci_pend_le_list_del_init(p);
5194
if (p->conn) {
5195
hci_conn_drop(p->conn);
5196
hci_conn_put(p->conn);
5197
p->conn = NULL;
5198
}
5199
}
5200
5201
BT_DBG("All LE pending actions cleared");
5202
}
5203
5204
static int hci_dev_shutdown(struct hci_dev *hdev)
5205
{
5206
int err = 0;
5207
/* Similar to how we first do setup and then set the exclusive access
5208
* bit for userspace, we must first unset userchannel and then clean up.
5209
* Otherwise, the kernel can't properly use the hci channel to clean up
5210
* the controller (some shutdown routines require sending additional
5211
* commands to the controller for example).
5212
*/
5213
bool was_userchannel =
5214
hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5215
5216
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5217
test_bit(HCI_UP, &hdev->flags)) {
5218
/* Execute vendor specific shutdown routine */
5219
if (hdev->shutdown)
5220
err = hdev->shutdown(hdev);
5221
}
5222
5223
if (was_userchannel)
5224
hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5225
5226
return err;
5227
}
5228
5229
int hci_dev_close_sync(struct hci_dev *hdev)
5230
{
5231
bool auto_off;
5232
int err = 0;
5233
5234
bt_dev_dbg(hdev, "");
5235
5236
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5237
disable_delayed_work(&hdev->power_off);
5238
disable_delayed_work(&hdev->ncmd_timer);
5239
disable_delayed_work(&hdev->le_scan_disable);
5240
} else {
5241
cancel_delayed_work(&hdev->power_off);
5242
cancel_delayed_work(&hdev->ncmd_timer);
5243
cancel_delayed_work(&hdev->le_scan_disable);
5244
}
5245
5246
hci_cmd_sync_cancel_sync(hdev, ENODEV);
5247
5248
cancel_interleave_scan(hdev);
5249
5250
if (hdev->adv_instance_timeout) {
5251
cancel_delayed_work_sync(&hdev->adv_instance_expire);
5252
hdev->adv_instance_timeout = 0;
5253
}
5254
5255
err = hci_dev_shutdown(hdev);
5256
5257
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5258
cancel_delayed_work_sync(&hdev->cmd_timer);
5259
return err;
5260
}
5261
5262
hci_leds_update_powered(hdev, false);
5263
5264
/* Flush RX and TX works */
5265
flush_work(&hdev->tx_work);
5266
flush_work(&hdev->rx_work);
5267
5268
if (hdev->discov_timeout > 0) {
5269
hdev->discov_timeout = 0;
5270
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5271
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5272
}
5273
5274
if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5275
cancel_delayed_work(&hdev->service_cache);
5276
5277
if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5278
struct adv_info *adv_instance;
5279
5280
cancel_delayed_work_sync(&hdev->rpa_expired);
5281
5282
list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5283
cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5284
}
5285
5286
/* Avoid potential lockdep warnings from the *_flush() calls by
5287
* ensuring the workqueue is empty up front.
5288
*/
5289
drain_workqueue(hdev->workqueue);
5290
5291
hci_dev_lock(hdev);
5292
5293
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5294
5295
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5296
5297
if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5298
hci_dev_test_flag(hdev, HCI_MGMT))
5299
__mgmt_power_off(hdev);
5300
5301
hci_inquiry_cache_flush(hdev);
5302
hci_pend_le_actions_clear(hdev);
5303
hci_conn_hash_flush(hdev);
5304
/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5305
smp_unregister(hdev);
5306
hci_dev_unlock(hdev);
5307
5308
hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5309
5310
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5311
aosp_do_close(hdev);
5312
msft_do_close(hdev);
5313
}
5314
5315
if (hdev->flush)
5316
hdev->flush(hdev);
5317
5318
/* Reset device */
5319
skb_queue_purge(&hdev->cmd_q);
5320
atomic_set(&hdev->cmd_cnt, 1);
5321
if (hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE) &&
5322
!auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5323
set_bit(HCI_INIT, &hdev->flags);
5324
hci_reset_sync(hdev);
5325
clear_bit(HCI_INIT, &hdev->flags);
5326
}
5327
5328
/* flush cmd work */
5329
flush_work(&hdev->cmd_work);
5330
5331
/* Drop queues */
5332
skb_queue_purge(&hdev->rx_q);
5333
skb_queue_purge(&hdev->cmd_q);
5334
skb_queue_purge(&hdev->raw_q);
5335
5336
/* Drop last sent command */
5337
if (hdev->sent_cmd) {
5338
cancel_delayed_work_sync(&hdev->cmd_timer);
5339
kfree_skb(hdev->sent_cmd);
5340
hdev->sent_cmd = NULL;
5341
}
5342
5343
/* Drop last request */
5344
if (hdev->req_skb) {
5345
kfree_skb(hdev->req_skb);
5346
hdev->req_skb = NULL;
5347
}
5348
5349
clear_bit(HCI_RUNNING, &hdev->flags);
5350
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5351
5352
/* After this point our queues are empty and no tasks are scheduled. */
5353
hdev->close(hdev);
5354
5355
/* Clear flags */
5356
hdev->flags &= BIT(HCI_RAW);
5357
hci_dev_clear_volatile_flags(hdev);
5358
5359
memset(hdev->eir, 0, sizeof(hdev->eir));
5360
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5361
bacpy(&hdev->random_addr, BDADDR_ANY);
5362
hci_codec_list_clear(&hdev->local_codecs);
5363
5364
hci_dev_put(hdev);
5365
return err;
5366
}
5367
5368
/* This function perform power on HCI command sequence as follows:
5369
*
5370
* If controller is already up (HCI_UP) performs hci_powered_update_sync
5371
* sequence otherwise run hci_dev_open_sync which will follow with
5372
* hci_powered_update_sync after the init sequence is completed.
5373
*/
5374
static int hci_power_on_sync(struct hci_dev *hdev)
5375
{
5376
int err;
5377
5378
if (test_bit(HCI_UP, &hdev->flags) &&
5379
hci_dev_test_flag(hdev, HCI_MGMT) &&
5380
hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5381
cancel_delayed_work(&hdev->power_off);
5382
return hci_powered_update_sync(hdev);
5383
}
5384
5385
err = hci_dev_open_sync(hdev);
5386
if (err < 0)
5387
return err;
5388
5389
/* During the HCI setup phase, a few error conditions are
5390
* ignored and they need to be checked now. If they are still
5391
* valid, it is important to return the device back off.
5392
*/
5393
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5394
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5395
(!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5396
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
5397
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5398
hci_dev_close_sync(hdev);
5399
} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5400
queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5401
HCI_AUTO_OFF_TIMEOUT);
5402
}
5403
5404
if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5405
/* For unconfigured devices, set the HCI_RAW flag
5406
* so that userspace can easily identify them.
5407
*/
5408
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5409
set_bit(HCI_RAW, &hdev->flags);
5410
5411
/* For fully configured devices, this will send
5412
* the Index Added event. For unconfigured devices,
5413
* it will send Unconfigued Index Added event.
5414
*
5415
* Devices with HCI_QUIRK_RAW_DEVICE are ignored
5416
* and no event will be send.
5417
*/
5418
mgmt_index_added(hdev);
5419
} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5420
/* When the controller is now configured, then it
5421
* is important to clear the HCI_RAW flag.
5422
*/
5423
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5424
clear_bit(HCI_RAW, &hdev->flags);
5425
5426
/* Powering on the controller with HCI_CONFIG set only
5427
* happens with the transition from unconfigured to
5428
* configured. This will send the Index Added event.
5429
*/
5430
mgmt_index_added(hdev);
5431
}
5432
5433
return 0;
5434
}
5435
5436
static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5437
{
5438
struct hci_cp_remote_name_req_cancel cp;
5439
5440
memset(&cp, 0, sizeof(cp));
5441
bacpy(&cp.bdaddr, addr);
5442
5443
return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5444
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5445
}
5446
5447
int hci_stop_discovery_sync(struct hci_dev *hdev)
5448
{
5449
struct discovery_state *d = &hdev->discovery;
5450
struct inquiry_entry *e;
5451
int err;
5452
5453
bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5454
5455
if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5456
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5457
err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5458
0, NULL, HCI_CMD_TIMEOUT);
5459
if (err)
5460
return err;
5461
}
5462
5463
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5464
cancel_delayed_work(&hdev->le_scan_disable);
5465
5466
err = hci_scan_disable_sync(hdev);
5467
if (err)
5468
return err;
5469
}
5470
5471
} else {
5472
err = hci_scan_disable_sync(hdev);
5473
if (err)
5474
return err;
5475
}
5476
5477
/* Resume advertising if it was paused */
5478
if (ll_privacy_capable(hdev))
5479
hci_resume_advertising_sync(hdev);
5480
5481
/* No further actions needed for LE-only discovery */
5482
if (d->type == DISCOV_TYPE_LE)
5483
return 0;
5484
5485
if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5486
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5487
NAME_PENDING);
5488
if (!e)
5489
return 0;
5490
5491
/* Ignore cancel errors since it should interfere with stopping
5492
* of the discovery.
5493
*/
5494
hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5495
}
5496
5497
return 0;
5498
}
5499
5500
static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5501
u8 reason)
5502
{
5503
struct hci_cp_disconnect cp;
5504
5505
if (conn->type == BIS_LINK || conn->type == PA_LINK) {
5506
/* This is a BIS connection, hci_conn_del will
5507
* do the necessary cleanup.
5508
*/
5509
hci_dev_lock(hdev);
5510
hci_conn_failed(conn, reason);
5511
hci_dev_unlock(hdev);
5512
5513
return 0;
5514
}
5515
5516
memset(&cp, 0, sizeof(cp));
5517
cp.handle = cpu_to_le16(conn->handle);
5518
cp.reason = reason;
5519
5520
/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5521
* reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5522
* used when suspending or powering off, where we don't want to wait
5523
* for the peer's response.
5524
*/
5525
if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5526
return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5527
sizeof(cp), &cp,
5528
HCI_EV_DISCONN_COMPLETE,
5529
HCI_CMD_TIMEOUT, NULL);
5530
5531
return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5532
HCI_CMD_TIMEOUT);
5533
}
5534
5535
static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5536
struct hci_conn *conn, u8 reason)
5537
{
5538
/* Return reason if scanning since the connection shall probably be
5539
* cleanup directly.
5540
*/
5541
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5542
return reason;
5543
5544
if (conn->role == HCI_ROLE_SLAVE ||
5545
test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5546
return 0;
5547
5548
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5549
0, NULL, HCI_CMD_TIMEOUT);
5550
}
5551
5552
static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5553
u8 reason)
5554
{
5555
if (conn->type == LE_LINK)
5556
return hci_le_connect_cancel_sync(hdev, conn, reason);
5557
5558
if (conn->type == CIS_LINK) {
5559
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5560
* page 1857:
5561
*
5562
* If this command is issued for a CIS on the Central and the
5563
* CIS is successfully terminated before being established,
5564
* then an HCI_LE_CIS_Established event shall also be sent for
5565
* this CIS with the Status Operation Cancelled by Host (0x44).
5566
*/
5567
if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5568
return hci_disconnect_sync(hdev, conn, reason);
5569
5570
/* CIS with no Create CIS sent have nothing to cancel */
5571
return HCI_ERROR_LOCAL_HOST_TERM;
5572
}
5573
5574
if (conn->type == BIS_LINK || conn->type == PA_LINK) {
5575
/* There is no way to cancel a BIS without terminating the BIG
5576
* which is done later on connection cleanup.
5577
*/
5578
return 0;
5579
}
5580
5581
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5582
return 0;
5583
5584
/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5585
* reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5586
* used when suspending or powering off, where we don't want to wait
5587
* for the peer's response.
5588
*/
5589
if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5590
return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5591
6, &conn->dst,
5592
HCI_EV_CONN_COMPLETE,
5593
HCI_CMD_TIMEOUT, NULL);
5594
5595
return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5596
6, &conn->dst, HCI_CMD_TIMEOUT);
5597
}
5598
5599
static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5600
u8 reason)
5601
{
5602
struct hci_cp_reject_sync_conn_req cp;
5603
5604
memset(&cp, 0, sizeof(cp));
5605
bacpy(&cp.bdaddr, &conn->dst);
5606
cp.reason = reason;
5607
5608
/* SCO rejection has its own limited set of
5609
* allowed error values (0x0D-0x0F).
5610
*/
5611
if (reason < 0x0d || reason > 0x0f)
5612
cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5613
5614
return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5615
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5616
}
5617
5618
static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5619
u8 reason)
5620
{
5621
struct hci_cp_le_reject_cis cp;
5622
5623
memset(&cp, 0, sizeof(cp));
5624
cp.handle = cpu_to_le16(conn->handle);
5625
cp.reason = reason;
5626
5627
return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5628
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5629
}
5630
5631
static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5632
u8 reason)
5633
{
5634
struct hci_cp_reject_conn_req cp;
5635
5636
if (conn->type == CIS_LINK)
5637
return hci_le_reject_cis_sync(hdev, conn, reason);
5638
5639
if (conn->type == BIS_LINK || conn->type == PA_LINK)
5640
return -EINVAL;
5641
5642
if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5643
return hci_reject_sco_sync(hdev, conn, reason);
5644
5645
memset(&cp, 0, sizeof(cp));
5646
bacpy(&cp.bdaddr, &conn->dst);
5647
cp.reason = reason;
5648
5649
return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5650
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5651
}
5652
5653
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5654
{
5655
int err = 0;
5656
u16 handle = conn->handle;
5657
bool disconnect = false;
5658
struct hci_conn *c;
5659
5660
switch (conn->state) {
5661
case BT_CONNECTED:
5662
case BT_CONFIG:
5663
err = hci_disconnect_sync(hdev, conn, reason);
5664
break;
5665
case BT_CONNECT:
5666
err = hci_connect_cancel_sync(hdev, conn, reason);
5667
break;
5668
case BT_CONNECT2:
5669
err = hci_reject_conn_sync(hdev, conn, reason);
5670
break;
5671
case BT_OPEN:
5672
case BT_BOUND:
5673
break;
5674
default:
5675
disconnect = true;
5676
break;
5677
}
5678
5679
hci_dev_lock(hdev);
5680
5681
/* Check if the connection has been cleaned up concurrently */
5682
c = hci_conn_hash_lookup_handle(hdev, handle);
5683
if (!c || c != conn) {
5684
err = 0;
5685
goto unlock;
5686
}
5687
5688
/* Cleanup hci_conn object if it cannot be cancelled as it
5689
* likely means the controller and host stack are out of sync
5690
* or in case of LE it was still scanning so it can be cleanup
5691
* safely.
5692
*/
5693
if (disconnect) {
5694
conn->state = BT_CLOSED;
5695
hci_disconn_cfm(conn, reason);
5696
hci_conn_del(conn);
5697
} else {
5698
hci_conn_failed(conn, reason);
5699
}
5700
5701
unlock:
5702
hci_dev_unlock(hdev);
5703
return err;
5704
}
5705
5706
static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5707
{
5708
struct list_head *head = &hdev->conn_hash.list;
5709
struct hci_conn *conn;
5710
5711
rcu_read_lock();
5712
while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5713
/* Make sure the connection is not freed while unlocking */
5714
conn = hci_conn_get(conn);
5715
rcu_read_unlock();
5716
/* Disregard possible errors since hci_conn_del shall have been
5717
* called even in case of errors had occurred since it would
5718
* then cause hci_conn_failed to be called which calls
5719
* hci_conn_del internally.
5720
*/
5721
hci_abort_conn_sync(hdev, conn, reason);
5722
hci_conn_put(conn);
5723
rcu_read_lock();
5724
}
5725
rcu_read_unlock();
5726
5727
return 0;
5728
}
5729
5730
/* This function perform power off HCI command sequence as follows:
5731
*
5732
* Clear Advertising
5733
* Stop Discovery
5734
* Disconnect all connections
5735
* hci_dev_close_sync
5736
*/
5737
static int hci_power_off_sync(struct hci_dev *hdev)
5738
{
5739
int err;
5740
5741
/* If controller is already down there is nothing to do */
5742
if (!test_bit(HCI_UP, &hdev->flags))
5743
return 0;
5744
5745
hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5746
5747
if (test_bit(HCI_ISCAN, &hdev->flags) ||
5748
test_bit(HCI_PSCAN, &hdev->flags)) {
5749
err = hci_write_scan_enable_sync(hdev, 0x00);
5750
if (err)
5751
goto out;
5752
}
5753
5754
err = hci_clear_adv_sync(hdev, NULL, false);
5755
if (err)
5756
goto out;
5757
5758
err = hci_stop_discovery_sync(hdev);
5759
if (err)
5760
goto out;
5761
5762
/* Terminated due to Power Off */
5763
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5764
if (err)
5765
goto out;
5766
5767
err = hci_dev_close_sync(hdev);
5768
5769
out:
5770
hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5771
return err;
5772
}
5773
5774
int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5775
{
5776
if (val)
5777
return hci_power_on_sync(hdev);
5778
5779
return hci_power_off_sync(hdev);
5780
}
5781
5782
static int hci_write_iac_sync(struct hci_dev *hdev)
5783
{
5784
struct hci_cp_write_current_iac_lap cp;
5785
5786
if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5787
return 0;
5788
5789
memset(&cp, 0, sizeof(cp));
5790
5791
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5792
/* Limited discoverable mode */
5793
cp.num_iac = min_t(u8, hdev->num_iac, 2);
5794
cp.iac_lap[0] = 0x00; /* LIAC */
5795
cp.iac_lap[1] = 0x8b;
5796
cp.iac_lap[2] = 0x9e;
5797
cp.iac_lap[3] = 0x33; /* GIAC */
5798
cp.iac_lap[4] = 0x8b;
5799
cp.iac_lap[5] = 0x9e;
5800
} else {
5801
/* General discoverable mode */
5802
cp.num_iac = 1;
5803
cp.iac_lap[0] = 0x33; /* GIAC */
5804
cp.iac_lap[1] = 0x8b;
5805
cp.iac_lap[2] = 0x9e;
5806
}
5807
5808
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5809
(cp.num_iac * 3) + 1, &cp,
5810
HCI_CMD_TIMEOUT);
5811
}
5812
5813
int hci_update_discoverable_sync(struct hci_dev *hdev)
5814
{
5815
int err = 0;
5816
5817
if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5818
err = hci_write_iac_sync(hdev);
5819
if (err)
5820
return err;
5821
5822
err = hci_update_scan_sync(hdev);
5823
if (err)
5824
return err;
5825
5826
err = hci_update_class_sync(hdev);
5827
if (err)
5828
return err;
5829
}
5830
5831
/* Advertising instances don't use the global discoverable setting, so
5832
* only update AD if advertising was enabled using Set Advertising.
5833
*/
5834
if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5835
err = hci_update_adv_data_sync(hdev, 0x00);
5836
if (err)
5837
return err;
5838
5839
/* Discoverable mode affects the local advertising
5840
* address in limited privacy mode.
5841
*/
5842
if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5843
if (ext_adv_capable(hdev))
5844
err = hci_start_ext_adv_sync(hdev, 0x00);
5845
else
5846
err = hci_enable_advertising_sync(hdev);
5847
}
5848
}
5849
5850
return err;
5851
}
5852
5853
static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5854
{
5855
return hci_update_discoverable_sync(hdev);
5856
}
5857
5858
int hci_update_discoverable(struct hci_dev *hdev)
5859
{
5860
/* Only queue if it would have any effect */
5861
if (hdev_is_powered(hdev) &&
5862
hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5863
hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5864
hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5865
return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5866
NULL);
5867
5868
return 0;
5869
}
5870
5871
int hci_update_connectable_sync(struct hci_dev *hdev)
5872
{
5873
int err;
5874
5875
err = hci_update_scan_sync(hdev);
5876
if (err)
5877
return err;
5878
5879
/* If BR/EDR is not enabled and we disable advertising as a
5880
* by-product of disabling connectable, we need to update the
5881
* advertising flags.
5882
*/
5883
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5884
err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5885
5886
/* Update the advertising parameters if necessary */
5887
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5888
!list_empty(&hdev->adv_instances)) {
5889
if (ext_adv_capable(hdev))
5890
err = hci_start_ext_adv_sync(hdev,
5891
hdev->cur_adv_instance);
5892
else
5893
err = hci_enable_advertising_sync(hdev);
5894
5895
if (err)
5896
return err;
5897
}
5898
5899
return hci_update_passive_scan_sync(hdev);
5900
}
5901
5902
int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5903
{
5904
const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5905
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5906
struct hci_cp_inquiry cp;
5907
5908
bt_dev_dbg(hdev, "");
5909
5910
if (test_bit(HCI_INQUIRY, &hdev->flags))
5911
return 0;
5912
5913
hci_dev_lock(hdev);
5914
hci_inquiry_cache_flush(hdev);
5915
hci_dev_unlock(hdev);
5916
5917
memset(&cp, 0, sizeof(cp));
5918
5919
if (hdev->discovery.limited)
5920
memcpy(&cp.lap, liac, sizeof(cp.lap));
5921
else
5922
memcpy(&cp.lap, giac, sizeof(cp.lap));
5923
5924
cp.length = length;
5925
cp.num_rsp = num_rsp;
5926
5927
return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5928
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5929
}
5930
5931
static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5932
{
5933
u8 own_addr_type;
5934
/* Accept list is not used for discovery */
5935
u8 filter_policy = 0x00;
5936
/* Default is to enable duplicates filter */
5937
u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5938
int err;
5939
5940
bt_dev_dbg(hdev, "");
5941
5942
/* If controller is scanning, it means the passive scanning is
5943
* running. Thus, we should temporarily stop it in order to set the
5944
* discovery scanning parameters.
5945
*/
5946
err = hci_scan_disable_sync(hdev);
5947
if (err) {
5948
bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5949
return err;
5950
}
5951
5952
cancel_interleave_scan(hdev);
5953
5954
/* Pause address resolution for active scan and stop advertising if
5955
* privacy is enabled.
5956
*/
5957
err = hci_pause_addr_resolution(hdev);
5958
if (err)
5959
goto failed;
5960
5961
/* All active scans will be done with either a resolvable private
5962
* address (when privacy feature has been enabled) or non-resolvable
5963
* private address.
5964
*/
5965
err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5966
&own_addr_type);
5967
if (err < 0)
5968
own_addr_type = ADDR_LE_DEV_PUBLIC;
5969
5970
if (hci_is_adv_monitoring(hdev) ||
5971
(hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER) &&
5972
hdev->discovery.result_filtering)) {
5973
/* Duplicate filter should be disabled when some advertisement
5974
* monitor is activated, otherwise AdvMon can only receive one
5975
* advertisement for one peer(*) during active scanning, and
5976
* might report loss to these peers.
5977
*
5978
* If controller does strict duplicate filtering and the
5979
* discovery requires result filtering disables controller based
5980
* filtering since that can cause reports that would match the
5981
* host filter to not be reported.
5982
*/
5983
filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5984
}
5985
5986
err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5987
hdev->le_scan_window_discovery,
5988
own_addr_type, filter_policy, filter_dup);
5989
if (!err)
5990
return err;
5991
5992
failed:
5993
/* Resume advertising if it was paused */
5994
if (ll_privacy_capable(hdev))
5995
hci_resume_advertising_sync(hdev);
5996
5997
/* Resume passive scanning */
5998
hci_update_passive_scan_sync(hdev);
5999
return err;
6000
}
6001
6002
static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
6003
{
6004
int err;
6005
6006
bt_dev_dbg(hdev, "");
6007
6008
err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
6009
if (err)
6010
return err;
6011
6012
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
6013
}
6014
6015
int hci_start_discovery_sync(struct hci_dev *hdev)
6016
{
6017
unsigned long timeout;
6018
int err;
6019
6020
bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
6021
6022
switch (hdev->discovery.type) {
6023
case DISCOV_TYPE_BREDR:
6024
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
6025
case DISCOV_TYPE_INTERLEAVED:
6026
/* When running simultaneous discovery, the LE scanning time
6027
* should occupy the whole discovery time sine BR/EDR inquiry
6028
* and LE scanning are scheduled by the controller.
6029
*
6030
* For interleaving discovery in comparison, BR/EDR inquiry
6031
* and LE scanning are done sequentially with separate
6032
* timeouts.
6033
*/
6034
if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) {
6035
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
6036
/* During simultaneous discovery, we double LE scan
6037
* interval. We must leave some time for the controller
6038
* to do BR/EDR inquiry.
6039
*/
6040
err = hci_start_interleaved_discovery_sync(hdev);
6041
break;
6042
}
6043
6044
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
6045
err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
6046
break;
6047
case DISCOV_TYPE_LE:
6048
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
6049
err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
6050
break;
6051
default:
6052
return -EINVAL;
6053
}
6054
6055
if (err)
6056
return err;
6057
6058
bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
6059
6060
queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
6061
timeout);
6062
return 0;
6063
}
6064
6065
static void hci_suspend_monitor_sync(struct hci_dev *hdev)
6066
{
6067
switch (hci_get_adv_monitor_offload_ext(hdev)) {
6068
case HCI_ADV_MONITOR_EXT_MSFT:
6069
msft_suspend_sync(hdev);
6070
break;
6071
default:
6072
return;
6073
}
6074
}
6075
6076
/* This function disables discovery and mark it as paused */
6077
static int hci_pause_discovery_sync(struct hci_dev *hdev)
6078
{
6079
int old_state = hdev->discovery.state;
6080
int err;
6081
6082
/* If discovery already stopped/stopping/paused there nothing to do */
6083
if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
6084
hdev->discovery_paused)
6085
return 0;
6086
6087
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6088
err = hci_stop_discovery_sync(hdev);
6089
if (err)
6090
return err;
6091
6092
hdev->discovery_paused = true;
6093
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6094
6095
return 0;
6096
}
6097
6098
static int hci_update_event_filter_sync(struct hci_dev *hdev)
6099
{
6100
struct bdaddr_list_with_flags *b;
6101
u8 scan = SCAN_DISABLED;
6102
bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
6103
int err;
6104
6105
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6106
return 0;
6107
6108
/* Some fake CSR controllers lock up after setting this type of
6109
* filter, so avoid sending the request altogether.
6110
*/
6111
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
6112
return 0;
6113
6114
/* Always clear event filter when starting */
6115
hci_clear_event_filter_sync(hdev);
6116
6117
list_for_each_entry(b, &hdev->accept_list, list) {
6118
if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
6119
continue;
6120
6121
bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
6122
6123
err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
6124
HCI_CONN_SETUP_ALLOW_BDADDR,
6125
&b->bdaddr,
6126
HCI_CONN_SETUP_AUTO_ON);
6127
if (err)
6128
bt_dev_err(hdev, "Failed to set event filter for %pMR",
6129
&b->bdaddr);
6130
else
6131
scan = SCAN_PAGE;
6132
}
6133
6134
if (scan && !scanning)
6135
hci_write_scan_enable_sync(hdev, scan);
6136
else if (!scan && scanning)
6137
hci_write_scan_enable_sync(hdev, scan);
6138
6139
return 0;
6140
}
6141
6142
/* This function disables scan (BR and LE) and mark it as paused */
6143
static int hci_pause_scan_sync(struct hci_dev *hdev)
6144
{
6145
if (hdev->scanning_paused)
6146
return 0;
6147
6148
/* Disable page scan if enabled */
6149
if (test_bit(HCI_PSCAN, &hdev->flags))
6150
hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
6151
6152
hci_scan_disable_sync(hdev);
6153
6154
hdev->scanning_paused = true;
6155
6156
return 0;
6157
}
6158
6159
/* This function performs the HCI suspend procedures in the follow order:
6160
*
6161
* Pause discovery (active scanning/inquiry)
6162
* Pause Directed Advertising/Advertising
6163
* Pause Scanning (passive scanning in case discovery was not active)
6164
* Disconnect all connections
6165
* Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6166
* otherwise:
6167
* Update event mask (only set events that are allowed to wake up the host)
6168
* Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6169
* Update passive scanning (lower duty cycle)
6170
* Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6171
*/
6172
int hci_suspend_sync(struct hci_dev *hdev)
6173
{
6174
int err;
6175
6176
/* If marked as suspended there nothing to do */
6177
if (hdev->suspended)
6178
return 0;
6179
6180
/* Mark device as suspended */
6181
hdev->suspended = true;
6182
6183
/* Pause discovery if not already stopped */
6184
hci_pause_discovery_sync(hdev);
6185
6186
/* Pause other advertisements */
6187
hci_pause_advertising_sync(hdev);
6188
6189
/* Suspend monitor filters */
6190
hci_suspend_monitor_sync(hdev);
6191
6192
/* Prevent disconnects from causing scanning to be re-enabled */
6193
hci_pause_scan_sync(hdev);
6194
6195
if (hci_conn_count(hdev)) {
6196
/* Soft disconnect everything (power off) */
6197
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6198
if (err) {
6199
/* Set state to BT_RUNNING so resume doesn't notify */
6200
hdev->suspend_state = BT_RUNNING;
6201
hci_resume_sync(hdev);
6202
return err;
6203
}
6204
6205
/* Update event mask so only the allowed event can wakeup the
6206
* host.
6207
*/
6208
hci_set_event_mask_sync(hdev);
6209
}
6210
6211
/* Only configure accept list if disconnect succeeded and wake
6212
* isn't being prevented.
6213
*/
6214
if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6215
hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6216
return 0;
6217
}
6218
6219
/* Unpause to take care of updating scanning params */
6220
hdev->scanning_paused = false;
6221
6222
/* Enable event filter for paired devices */
6223
hci_update_event_filter_sync(hdev);
6224
6225
/* Update LE passive scan if enabled */
6226
hci_update_passive_scan_sync(hdev);
6227
6228
/* Pause scan changes again. */
6229
hdev->scanning_paused = true;
6230
6231
hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6232
6233
return 0;
6234
}
6235
6236
/* This function resumes discovery */
6237
static int hci_resume_discovery_sync(struct hci_dev *hdev)
6238
{
6239
int err;
6240
6241
/* If discovery not paused there nothing to do */
6242
if (!hdev->discovery_paused)
6243
return 0;
6244
6245
hdev->discovery_paused = false;
6246
6247
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6248
6249
err = hci_start_discovery_sync(hdev);
6250
6251
hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6252
DISCOVERY_FINDING);
6253
6254
return err;
6255
}
6256
6257
static void hci_resume_monitor_sync(struct hci_dev *hdev)
6258
{
6259
switch (hci_get_adv_monitor_offload_ext(hdev)) {
6260
case HCI_ADV_MONITOR_EXT_MSFT:
6261
msft_resume_sync(hdev);
6262
break;
6263
default:
6264
return;
6265
}
6266
}
6267
6268
/* This function resume scan and reset paused flag */
6269
static int hci_resume_scan_sync(struct hci_dev *hdev)
6270
{
6271
if (!hdev->scanning_paused)
6272
return 0;
6273
6274
hdev->scanning_paused = false;
6275
6276
hci_update_scan_sync(hdev);
6277
6278
/* Reset passive scanning to normal */
6279
hci_update_passive_scan_sync(hdev);
6280
6281
return 0;
6282
}
6283
6284
/* This function performs the HCI suspend procedures in the follow order:
6285
*
6286
* Restore event mask
6287
* Clear event filter
6288
* Update passive scanning (normal duty cycle)
6289
* Resume Directed Advertising/Advertising
6290
* Resume discovery (active scanning/inquiry)
6291
*/
6292
int hci_resume_sync(struct hci_dev *hdev)
6293
{
6294
/* If not marked as suspended there nothing to do */
6295
if (!hdev->suspended)
6296
return 0;
6297
6298
hdev->suspended = false;
6299
6300
/* Restore event mask */
6301
hci_set_event_mask_sync(hdev);
6302
6303
/* Clear any event filters and restore scan state */
6304
hci_clear_event_filter_sync(hdev);
6305
6306
/* Resume scanning */
6307
hci_resume_scan_sync(hdev);
6308
6309
/* Resume monitor filters */
6310
hci_resume_monitor_sync(hdev);
6311
6312
/* Resume other advertisements */
6313
hci_resume_advertising_sync(hdev);
6314
6315
/* Resume discovery */
6316
hci_resume_discovery_sync(hdev);
6317
6318
return 0;
6319
}
6320
6321
static bool conn_use_rpa(struct hci_conn *conn)
6322
{
6323
struct hci_dev *hdev = conn->hdev;
6324
6325
return hci_dev_test_flag(hdev, HCI_PRIVACY);
6326
}
6327
6328
static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6329
struct hci_conn *conn)
6330
{
6331
struct hci_cp_le_set_ext_adv_params cp;
6332
struct hci_rp_le_set_ext_adv_params rp;
6333
int err;
6334
bdaddr_t random_addr;
6335
u8 own_addr_type;
6336
6337
err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6338
&own_addr_type);
6339
if (err)
6340
return err;
6341
6342
/* Set require_privacy to false so that the remote device has a
6343
* chance of identifying us.
6344
*/
6345
err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6346
&own_addr_type, &random_addr);
6347
if (err)
6348
return err;
6349
6350
memset(&cp, 0, sizeof(cp));
6351
6352
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6353
cp.channel_map = hdev->le_adv_channel_map;
6354
cp.tx_power = HCI_TX_POWER_INVALID;
6355
cp.primary_phy = HCI_ADV_PHY_1M;
6356
cp.secondary_phy = HCI_ADV_PHY_1M;
6357
cp.handle = 0x00; /* Use instance 0 for directed adv */
6358
cp.own_addr_type = own_addr_type;
6359
cp.peer_addr_type = conn->dst_type;
6360
bacpy(&cp.peer_addr, &conn->dst);
6361
6362
/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6363
* advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6364
* does not supports advertising data when the advertising set already
6365
* contains some, the controller shall return erroc code 'Invalid
6366
* HCI Command Parameters(0x12).
6367
* So it is required to remove adv set for handle 0x00. since we use
6368
* instance 0 for directed adv.
6369
*/
6370
err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6371
if (err)
6372
return err;
6373
6374
err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp);
6375
if (err)
6376
return err;
6377
6378
/* Update adv data as tx power is known now */
6379
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
6380
if (err)
6381
return err;
6382
6383
/* Check if random address need to be updated */
6384
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6385
bacmp(&random_addr, BDADDR_ANY) &&
6386
bacmp(&random_addr, &hdev->random_addr)) {
6387
err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6388
&random_addr);
6389
if (err)
6390
return err;
6391
}
6392
6393
return hci_enable_ext_advertising_sync(hdev, 0x00);
6394
}
6395
6396
static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6397
struct hci_conn *conn)
6398
{
6399
struct hci_cp_le_set_adv_param cp;
6400
u8 status;
6401
u8 own_addr_type;
6402
u8 enable;
6403
6404
if (ext_adv_capable(hdev))
6405
return hci_le_ext_directed_advertising_sync(hdev, conn);
6406
6407
/* Clear the HCI_LE_ADV bit temporarily so that the
6408
* hci_update_random_address knows that it's safe to go ahead
6409
* and write a new random address. The flag will be set back on
6410
* as soon as the SET_ADV_ENABLE HCI command completes.
6411
*/
6412
hci_dev_clear_flag(hdev, HCI_LE_ADV);
6413
6414
/* Set require_privacy to false so that the remote device has a
6415
* chance of identifying us.
6416
*/
6417
status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6418
&own_addr_type);
6419
if (status)
6420
return status;
6421
6422
memset(&cp, 0, sizeof(cp));
6423
6424
/* Some controllers might reject command if intervals are not
6425
* within range for undirected advertising.
6426
* BCM20702A0 is known to be affected by this.
6427
*/
6428
cp.min_interval = cpu_to_le16(0x0020);
6429
cp.max_interval = cpu_to_le16(0x0020);
6430
6431
cp.type = LE_ADV_DIRECT_IND;
6432
cp.own_address_type = own_addr_type;
6433
cp.direct_addr_type = conn->dst_type;
6434
bacpy(&cp.direct_addr, &conn->dst);
6435
cp.channel_map = hdev->le_adv_channel_map;
6436
6437
status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6438
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6439
if (status)
6440
return status;
6441
6442
enable = 0x01;
6443
6444
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6445
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6446
}
6447
6448
static void set_ext_conn_params(struct hci_conn *conn,
6449
struct hci_cp_le_ext_conn_param *p)
6450
{
6451
struct hci_dev *hdev = conn->hdev;
6452
6453
memset(p, 0, sizeof(*p));
6454
6455
p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6456
p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6457
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6458
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6459
p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6460
p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6461
p->min_ce_len = cpu_to_le16(0x0000);
6462
p->max_ce_len = cpu_to_le16(0x0000);
6463
}
6464
6465
static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6466
struct hci_conn *conn, u8 own_addr_type)
6467
{
6468
struct hci_cp_le_ext_create_conn *cp;
6469
struct hci_cp_le_ext_conn_param *p;
6470
u8 data[sizeof(*cp) + sizeof(*p) * 3];
6471
u32 plen;
6472
6473
cp = (void *)data;
6474
p = (void *)cp->data;
6475
6476
memset(cp, 0, sizeof(*cp));
6477
6478
bacpy(&cp->peer_addr, &conn->dst);
6479
cp->peer_addr_type = conn->dst_type;
6480
cp->own_addr_type = own_addr_type;
6481
6482
plen = sizeof(*cp);
6483
6484
if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6485
conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6486
cp->phys |= LE_SCAN_PHY_1M;
6487
set_ext_conn_params(conn, p);
6488
6489
p++;
6490
plen += sizeof(*p);
6491
}
6492
6493
if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6494
conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6495
cp->phys |= LE_SCAN_PHY_2M;
6496
set_ext_conn_params(conn, p);
6497
6498
p++;
6499
plen += sizeof(*p);
6500
}
6501
6502
if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6503
conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6504
cp->phys |= LE_SCAN_PHY_CODED;
6505
set_ext_conn_params(conn, p);
6506
6507
plen += sizeof(*p);
6508
}
6509
6510
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6511
plen, data,
6512
HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6513
conn->conn_timeout, NULL);
6514
}
6515
6516
static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6517
{
6518
struct hci_cp_le_create_conn cp;
6519
struct hci_conn_params *params;
6520
u8 own_addr_type;
6521
int err;
6522
struct hci_conn *conn = data;
6523
6524
if (!hci_conn_valid(hdev, conn))
6525
return -ECANCELED;
6526
6527
bt_dev_dbg(hdev, "conn %p", conn);
6528
6529
clear_bit(HCI_CONN_SCANNING, &conn->flags);
6530
conn->state = BT_CONNECT;
6531
6532
/* If requested to connect as peripheral use directed advertising */
6533
if (conn->role == HCI_ROLE_SLAVE) {
6534
/* If we're active scanning and simultaneous roles is not
6535
* enabled simply reject the attempt.
6536
*/
6537
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6538
hdev->le_scan_type == LE_SCAN_ACTIVE &&
6539
!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6540
hci_conn_del(conn);
6541
return -EBUSY;
6542
}
6543
6544
/* Pause advertising while doing directed advertising. */
6545
hci_pause_advertising_sync(hdev);
6546
6547
err = hci_le_directed_advertising_sync(hdev, conn);
6548
goto done;
6549
}
6550
6551
/* Disable advertising if simultaneous roles is not in use. */
6552
if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6553
hci_pause_advertising_sync(hdev);
6554
6555
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6556
if (params) {
6557
conn->le_conn_min_interval = params->conn_min_interval;
6558
conn->le_conn_max_interval = params->conn_max_interval;
6559
conn->le_conn_latency = params->conn_latency;
6560
conn->le_supv_timeout = params->supervision_timeout;
6561
} else {
6562
conn->le_conn_min_interval = hdev->le_conn_min_interval;
6563
conn->le_conn_max_interval = hdev->le_conn_max_interval;
6564
conn->le_conn_latency = hdev->le_conn_latency;
6565
conn->le_supv_timeout = hdev->le_supv_timeout;
6566
}
6567
6568
/* If controller is scanning, we stop it since some controllers are
6569
* not able to scan and connect at the same time. Also set the
6570
* HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6571
* handler for scan disabling knows to set the correct discovery
6572
* state.
6573
*/
6574
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6575
hci_scan_disable_sync(hdev);
6576
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6577
}
6578
6579
/* Update random address, but set require_privacy to false so
6580
* that we never connect with an non-resolvable address.
6581
*/
6582
err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6583
&own_addr_type);
6584
if (err)
6585
goto done;
6586
/* Send command LE Extended Create Connection if supported */
6587
if (use_ext_conn(hdev)) {
6588
err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6589
goto done;
6590
}
6591
6592
memset(&cp, 0, sizeof(cp));
6593
6594
cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6595
cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6596
6597
bacpy(&cp.peer_addr, &conn->dst);
6598
cp.peer_addr_type = conn->dst_type;
6599
cp.own_address_type = own_addr_type;
6600
cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6601
cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6602
cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6603
cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6604
cp.min_ce_len = cpu_to_le16(0x0000);
6605
cp.max_ce_len = cpu_to_le16(0x0000);
6606
6607
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6608
*
6609
* If this event is unmasked and the HCI_LE_Connection_Complete event
6610
* is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6611
* sent when a new connection has been created.
6612
*/
6613
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6614
sizeof(cp), &cp,
6615
use_enhanced_conn_complete(hdev) ?
6616
HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6617
HCI_EV_LE_CONN_COMPLETE,
6618
conn->conn_timeout, NULL);
6619
6620
done:
6621
if (err == -ETIMEDOUT)
6622
hci_le_connect_cancel_sync(hdev, conn, 0x00);
6623
6624
/* Re-enable advertising after the connection attempt is finished. */
6625
hci_resume_advertising_sync(hdev);
6626
return err;
6627
}
6628
6629
int hci_le_create_cis_sync(struct hci_dev *hdev)
6630
{
6631
DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6632
size_t aux_num_cis = 0;
6633
struct hci_conn *conn;
6634
u8 cig = BT_ISO_QOS_CIG_UNSET;
6635
6636
/* The spec allows only one pending LE Create CIS command at a time. If
6637
* the command is pending now, don't do anything. We check for pending
6638
* connections after each CIS Established event.
6639
*
6640
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6641
* page 2566:
6642
*
6643
* If the Host issues this command before all the
6644
* HCI_LE_CIS_Established events from the previous use of the
6645
* command have been generated, the Controller shall return the
6646
* error code Command Disallowed (0x0C).
6647
*
6648
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6649
* page 2567:
6650
*
6651
* When the Controller receives the HCI_LE_Create_CIS command, the
6652
* Controller sends the HCI_Command_Status event to the Host. An
6653
* HCI_LE_CIS_Established event will be generated for each CIS when it
6654
* is established or if it is disconnected or considered lost before
6655
* being established; until all the events are generated, the command
6656
* remains pending.
6657
*/
6658
6659
hci_dev_lock(hdev);
6660
6661
rcu_read_lock();
6662
6663
/* Wait until previous Create CIS has completed */
6664
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6665
if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6666
goto done;
6667
}
6668
6669
/* Find CIG with all CIS ready */
6670
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6671
struct hci_conn *link;
6672
6673
if (hci_conn_check_create_cis(conn))
6674
continue;
6675
6676
cig = conn->iso_qos.ucast.cig;
6677
6678
list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6679
if (hci_conn_check_create_cis(link) > 0 &&
6680
link->iso_qos.ucast.cig == cig &&
6681
link->state != BT_CONNECTED) {
6682
cig = BT_ISO_QOS_CIG_UNSET;
6683
break;
6684
}
6685
}
6686
6687
if (cig != BT_ISO_QOS_CIG_UNSET)
6688
break;
6689
}
6690
6691
if (cig == BT_ISO_QOS_CIG_UNSET)
6692
goto done;
6693
6694
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6695
struct hci_cis *cis = &cmd->cis[aux_num_cis];
6696
6697
if (hci_conn_check_create_cis(conn) ||
6698
conn->iso_qos.ucast.cig != cig)
6699
continue;
6700
6701
set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6702
cis->acl_handle = cpu_to_le16(conn->parent->handle);
6703
cis->cis_handle = cpu_to_le16(conn->handle);
6704
aux_num_cis++;
6705
6706
if (aux_num_cis >= cmd->num_cis)
6707
break;
6708
}
6709
cmd->num_cis = aux_num_cis;
6710
6711
done:
6712
rcu_read_unlock();
6713
6714
hci_dev_unlock(hdev);
6715
6716
if (!aux_num_cis)
6717
return 0;
6718
6719
/* Wait for HCI_LE_CIS_Established */
6720
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6721
struct_size(cmd, cis, cmd->num_cis),
6722
cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6723
conn->conn_timeout, NULL);
6724
}
6725
6726
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6727
{
6728
struct hci_cp_le_remove_cig cp;
6729
6730
memset(&cp, 0, sizeof(cp));
6731
cp.cig_id = handle;
6732
6733
return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6734
&cp, HCI_CMD_TIMEOUT);
6735
}
6736
6737
int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6738
{
6739
struct hci_cp_le_big_term_sync cp;
6740
6741
memset(&cp, 0, sizeof(cp));
6742
cp.handle = handle;
6743
6744
return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6745
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6746
}
6747
6748
int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6749
{
6750
struct hci_cp_le_pa_term_sync cp;
6751
6752
memset(&cp, 0, sizeof(cp));
6753
cp.handle = cpu_to_le16(handle);
6754
6755
return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6756
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6757
}
6758
6759
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6760
bool use_rpa, struct adv_info *adv_instance,
6761
u8 *own_addr_type, bdaddr_t *rand_addr)
6762
{
6763
int err;
6764
6765
bacpy(rand_addr, BDADDR_ANY);
6766
6767
/* If privacy is enabled use a resolvable private address. If
6768
* current RPA has expired then generate a new one.
6769
*/
6770
if (use_rpa) {
6771
/* If Controller supports LL Privacy use own address type is
6772
* 0x03
6773
*/
6774
if (ll_privacy_capable(hdev))
6775
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6776
else
6777
*own_addr_type = ADDR_LE_DEV_RANDOM;
6778
6779
if (adv_instance) {
6780
if (adv_rpa_valid(adv_instance))
6781
return 0;
6782
} else {
6783
if (rpa_valid(hdev))
6784
return 0;
6785
}
6786
6787
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6788
if (err < 0) {
6789
bt_dev_err(hdev, "failed to generate new RPA");
6790
return err;
6791
}
6792
6793
bacpy(rand_addr, &hdev->rpa);
6794
6795
return 0;
6796
}
6797
6798
/* In case of required privacy without resolvable private address,
6799
* use an non-resolvable private address. This is useful for
6800
* non-connectable advertising.
6801
*/
6802
if (require_privacy) {
6803
bdaddr_t nrpa;
6804
6805
while (true) {
6806
/* The non-resolvable private address is generated
6807
* from random six bytes with the two most significant
6808
* bits cleared.
6809
*/
6810
get_random_bytes(&nrpa, 6);
6811
nrpa.b[5] &= 0x3f;
6812
6813
/* The non-resolvable private address shall not be
6814
* equal to the public address.
6815
*/
6816
if (bacmp(&hdev->bdaddr, &nrpa))
6817
break;
6818
}
6819
6820
*own_addr_type = ADDR_LE_DEV_RANDOM;
6821
bacpy(rand_addr, &nrpa);
6822
6823
return 0;
6824
}
6825
6826
/* No privacy, use the current address */
6827
hci_copy_identity_address(hdev, rand_addr, own_addr_type);
6828
6829
return 0;
6830
}
6831
6832
static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6833
{
6834
u8 instance = PTR_UINT(data);
6835
6836
return hci_update_adv_data_sync(hdev, instance);
6837
}
6838
6839
int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6840
{
6841
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6842
UINT_PTR(instance), NULL);
6843
}
6844
6845
static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6846
{
6847
struct hci_conn *conn = data;
6848
struct inquiry_entry *ie;
6849
struct hci_cp_create_conn cp;
6850
int err;
6851
6852
if (!hci_conn_valid(hdev, conn))
6853
return -ECANCELED;
6854
6855
/* Many controllers disallow HCI Create Connection while it is doing
6856
* HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6857
* Connection. This may cause the MGMT discovering state to become false
6858
* without user space's request but it is okay since the MGMT Discovery
6859
* APIs do not promise that discovery should be done forever. Instead,
6860
* the user space monitors the status of MGMT discovering and it may
6861
* request for discovery again when this flag becomes false.
6862
*/
6863
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6864
err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6865
NULL, HCI_CMD_TIMEOUT);
6866
if (err)
6867
bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6868
}
6869
6870
conn->state = BT_CONNECT;
6871
conn->out = true;
6872
conn->role = HCI_ROLE_MASTER;
6873
6874
conn->attempt++;
6875
6876
conn->link_policy = hdev->link_policy;
6877
6878
memset(&cp, 0, sizeof(cp));
6879
bacpy(&cp.bdaddr, &conn->dst);
6880
cp.pscan_rep_mode = 0x02;
6881
6882
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6883
if (ie) {
6884
if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6885
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6886
cp.pscan_mode = ie->data.pscan_mode;
6887
cp.clock_offset = ie->data.clock_offset |
6888
cpu_to_le16(0x8000);
6889
}
6890
6891
memcpy(conn->dev_class, ie->data.dev_class, 3);
6892
}
6893
6894
cp.pkt_type = cpu_to_le16(conn->pkt_type);
6895
if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6896
cp.role_switch = 0x01;
6897
else
6898
cp.role_switch = 0x00;
6899
6900
return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6901
sizeof(cp), &cp,
6902
HCI_EV_CONN_COMPLETE,
6903
conn->conn_timeout, NULL);
6904
}
6905
6906
int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6907
{
6908
return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6909
NULL);
6910
}
6911
6912
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6913
{
6914
struct hci_conn *conn = data;
6915
6916
bt_dev_dbg(hdev, "err %d", err);
6917
6918
if (err == -ECANCELED)
6919
return;
6920
6921
hci_dev_lock(hdev);
6922
6923
if (!hci_conn_valid(hdev, conn))
6924
goto done;
6925
6926
if (!err) {
6927
hci_connect_le_scan_cleanup(conn, 0x00);
6928
goto done;
6929
}
6930
6931
/* Check if connection is still pending */
6932
if (conn != hci_lookup_le_connect(hdev))
6933
goto done;
6934
6935
/* Flush to make sure we send create conn cancel command if needed */
6936
flush_delayed_work(&conn->le_conn_timeout);
6937
hci_conn_failed(conn, bt_status(err));
6938
6939
done:
6940
hci_dev_unlock(hdev);
6941
}
6942
6943
int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6944
{
6945
return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6946
create_le_conn_complete);
6947
}
6948
6949
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6950
{
6951
if (conn->state != BT_OPEN)
6952
return -EINVAL;
6953
6954
switch (conn->type) {
6955
case ACL_LINK:
6956
return !hci_cmd_sync_dequeue_once(hdev,
6957
hci_acl_create_conn_sync,
6958
conn, NULL);
6959
case LE_LINK:
6960
return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6961
conn, create_le_conn_complete);
6962
}
6963
6964
return -ENOENT;
6965
}
6966
6967
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6968
struct hci_conn_params *params)
6969
{
6970
struct hci_cp_le_conn_update cp;
6971
6972
memset(&cp, 0, sizeof(cp));
6973
cp.handle = cpu_to_le16(conn->handle);
6974
cp.conn_interval_min = cpu_to_le16(params->conn_min_interval);
6975
cp.conn_interval_max = cpu_to_le16(params->conn_max_interval);
6976
cp.conn_latency = cpu_to_le16(params->conn_latency);
6977
cp.supervision_timeout = cpu_to_le16(params->supervision_timeout);
6978
cp.min_ce_len = cpu_to_le16(0x0000);
6979
cp.max_ce_len = cpu_to_le16(0x0000);
6980
6981
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6982
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6983
}
6984
6985
static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
6986
{
6987
struct hci_conn *conn = data;
6988
struct hci_conn *pa_sync;
6989
6990
bt_dev_dbg(hdev, "err %d", err);
6991
6992
if (err == -ECANCELED)
6993
return;
6994
6995
hci_dev_lock(hdev);
6996
6997
if (!hci_conn_valid(hdev, conn))
6998
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6999
7000
if (!err)
7001
goto unlock;
7002
7003
/* Add connection to indicate PA sync error */
7004
pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
7005
HCI_ROLE_SLAVE);
7006
7007
if (IS_ERR(pa_sync))
7008
goto unlock;
7009
7010
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
7011
7012
/* Notify iso layer */
7013
hci_connect_cfm(pa_sync, bt_status(err));
7014
7015
unlock:
7016
hci_dev_unlock(hdev);
7017
}
7018
7019
static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
7020
{
7021
struct hci_cp_le_pa_create_sync cp;
7022
struct hci_conn *conn = data;
7023
struct bt_iso_qos *qos = &conn->iso_qos;
7024
int err;
7025
7026
if (!hci_conn_valid(hdev, conn))
7027
return -ECANCELED;
7028
7029
if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID)
7030
return -EINVAL;
7031
7032
if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
7033
return -EBUSY;
7034
7035
/* Stop scanning if SID has not been set and active scanning is enabled
7036
* so we use passive scanning which will be scanning using the allow
7037
* list programmed to contain only the connection address.
7038
*/
7039
if (conn->sid == HCI_SID_INVALID &&
7040
hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
7041
hci_scan_disable_sync(hdev);
7042
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
7043
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
7044
}
7045
7046
/* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
7047
* program the address in the allow list so PA advertisements can be
7048
* received.
7049
*/
7050
set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
7051
7052
hci_update_passive_scan_sync(hdev);
7053
7054
/* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
7055
* it.
7056
*/
7057
if (conn->sid == HCI_SID_INVALID) {
7058
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
7059
HCI_EV_LE_EXT_ADV_REPORT,
7060
conn->conn_timeout, NULL);
7061
if (err == -ETIMEDOUT)
7062
goto done;
7063
}
7064
7065
memset(&cp, 0, sizeof(cp));
7066
cp.options = qos->bcast.options;
7067
cp.sid = conn->sid;
7068
cp.addr_type = conn->dst_type;
7069
bacpy(&cp.addr, &conn->dst);
7070
cp.skip = cpu_to_le16(qos->bcast.skip);
7071
cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
7072
cp.sync_cte_type = qos->bcast.sync_cte_type;
7073
7074
/* The spec allows only one pending LE Periodic Advertising Create
7075
* Sync command at a time so we forcefully wait for PA Sync Established
7076
* event since cmd_work can only schedule one command at a time.
7077
*
7078
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
7079
* page 2493:
7080
*
7081
* If the Host issues this command when another HCI_LE_Periodic_
7082
* Advertising_Create_Sync command is pending, the Controller shall
7083
* return the error code Command Disallowed (0x0C).
7084
*/
7085
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC,
7086
sizeof(cp), &cp,
7087
HCI_EV_LE_PA_SYNC_ESTABLISHED,
7088
conn->conn_timeout, NULL);
7089
if (err == -ETIMEDOUT)
7090
__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
7091
0, NULL, HCI_CMD_TIMEOUT);
7092
7093
done:
7094
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
7095
7096
/* Update passive scan since HCI_PA_SYNC flag has been cleared */
7097
hci_update_passive_scan_sync(hdev);
7098
7099
return err;
7100
}
7101
7102
int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn)
7103
{
7104
return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn,
7105
create_pa_complete);
7106
}
7107
7108
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
7109
{
7110
struct hci_conn *conn = data;
7111
7112
bt_dev_dbg(hdev, "err %d", err);
7113
7114
if (err == -ECANCELED)
7115
return;
7116
7117
if (hci_conn_valid(hdev, conn))
7118
clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
7119
}
7120
7121
static int hci_le_big_create_sync(struct hci_dev *hdev, void *data)
7122
{
7123
DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11);
7124
struct hci_conn *conn = data;
7125
struct bt_iso_qos *qos = &conn->iso_qos;
7126
int err;
7127
7128
if (!hci_conn_valid(hdev, conn))
7129
return -ECANCELED;
7130
7131
set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
7132
7133
memset(cp, 0, sizeof(*cp));
7134
cp->handle = qos->bcast.big;
7135
cp->sync_handle = cpu_to_le16(conn->sync_handle);
7136
cp->encryption = qos->bcast.encryption;
7137
memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode));
7138
cp->mse = qos->bcast.mse;
7139
cp->timeout = cpu_to_le16(qos->bcast.timeout);
7140
cp->num_bis = conn->num_bis;
7141
memcpy(cp->bis, conn->bis, conn->num_bis);
7142
7143
/* The spec allows only one pending LE BIG Create Sync command at
7144
* a time, so we forcefully wait for BIG Sync Established event since
7145
* cmd_work can only schedule one command at a time.
7146
*
7147
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
7148
* page 2586:
7149
*
7150
* If the Host sends this command when the Controller is in the
7151
* process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
7152
* Established event has not been generated, the Controller shall
7153
* return the error code Command Disallowed (0x0C).
7154
*/
7155
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
7156
struct_size(cp, bis, cp->num_bis), cp,
7157
HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7158
conn->conn_timeout, NULL);
7159
if (err == -ETIMEDOUT)
7160
hci_le_big_terminate_sync(hdev, cp->handle);
7161
7162
return err;
7163
}
7164
7165
int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn)
7166
{
7167
return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn,
7168
create_big_complete);
7169
}
7170
7171