Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/hci_event.c
29269 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
Copyright 2023-2024 NXP
5
6
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
7
8
This program is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License version 2 as
10
published by the Free Software Foundation;
11
12
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23
SOFTWARE IS DISCLAIMED.
24
*/
25
26
/* Bluetooth HCI event handling. */
27
28
#include <linux/unaligned.h>
29
#include <linux/crypto.h>
30
#include <crypto/algapi.h>
31
32
#include <net/bluetooth/bluetooth.h>
33
#include <net/bluetooth/hci_core.h>
34
#include <net/bluetooth/mgmt.h>
35
36
#include "hci_debugfs.h"
37
#include "hci_codec.h"
38
#include "smp.h"
39
#include "msft.h"
40
#include "eir.h"
41
42
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43
"\x00\x00\x00\x00\x00\x00\x00\x00"
44
45
/* Handle HCI Event packets */
46
47
static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
48
u8 ev, size_t len)
49
{
50
void *data;
51
52
data = skb_pull_data(skb, len);
53
if (!data)
54
bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
55
56
return data;
57
}
58
59
static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
60
u16 op, size_t len)
61
{
62
void *data;
63
64
data = skb_pull_data(skb, len);
65
if (!data)
66
bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
67
68
return data;
69
}
70
71
static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
72
u8 ev, size_t len)
73
{
74
void *data;
75
76
data = skb_pull_data(skb, len);
77
if (!data)
78
bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
79
80
return data;
81
}
82
83
static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
84
struct sk_buff *skb)
85
{
86
struct hci_ev_status *rp = data;
87
88
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
89
90
/* It is possible that we receive Inquiry Complete event right
91
* before we receive Inquiry Cancel Command Complete event, in
92
* which case the latter event should have status of Command
93
* Disallowed. This should not be treated as error, since
94
* we actually achieve what Inquiry Cancel wants to achieve,
95
* which is to end the last Inquiry session.
96
*/
97
if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
98
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
99
rp->status = 0x00;
100
}
101
102
if (rp->status)
103
return rp->status;
104
105
clear_bit(HCI_INQUIRY, &hdev->flags);
106
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
107
wake_up_bit(&hdev->flags, HCI_INQUIRY);
108
109
hci_dev_lock(hdev);
110
/* Set discovery state to stopped if we're not doing LE active
111
* scanning.
112
*/
113
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
114
hdev->le_scan_type != LE_SCAN_ACTIVE)
115
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
116
hci_dev_unlock(hdev);
117
118
return rp->status;
119
}
120
121
static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
122
struct sk_buff *skb)
123
{
124
struct hci_ev_status *rp = data;
125
126
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
127
128
if (rp->status)
129
return rp->status;
130
131
hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
132
133
return rp->status;
134
}
135
136
static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
137
struct sk_buff *skb)
138
{
139
struct hci_ev_status *rp = data;
140
141
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
142
143
if (rp->status)
144
return rp->status;
145
146
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
147
148
return rp->status;
149
}
150
151
static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
152
struct sk_buff *skb)
153
{
154
struct hci_rp_remote_name_req_cancel *rp = data;
155
156
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
157
158
return rp->status;
159
}
160
161
static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
162
struct sk_buff *skb)
163
{
164
struct hci_rp_role_discovery *rp = data;
165
struct hci_conn *conn;
166
167
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
168
169
if (rp->status)
170
return rp->status;
171
172
hci_dev_lock(hdev);
173
174
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175
if (conn)
176
conn->role = rp->role;
177
178
hci_dev_unlock(hdev);
179
180
return rp->status;
181
}
182
183
static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
184
struct sk_buff *skb)
185
{
186
struct hci_rp_read_link_policy *rp = data;
187
struct hci_conn *conn;
188
189
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
190
191
if (rp->status)
192
return rp->status;
193
194
hci_dev_lock(hdev);
195
196
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
197
if (conn)
198
conn->link_policy = __le16_to_cpu(rp->policy);
199
200
hci_dev_unlock(hdev);
201
202
return rp->status;
203
}
204
205
static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
206
struct sk_buff *skb)
207
{
208
struct hci_rp_write_link_policy *rp = data;
209
struct hci_conn *conn;
210
void *sent;
211
212
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
213
214
if (rp->status)
215
return rp->status;
216
217
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
218
if (!sent)
219
return rp->status;
220
221
hci_dev_lock(hdev);
222
223
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
224
if (conn)
225
conn->link_policy = get_unaligned_le16(sent + 2);
226
227
hci_dev_unlock(hdev);
228
229
return rp->status;
230
}
231
232
static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
233
struct sk_buff *skb)
234
{
235
struct hci_rp_read_def_link_policy *rp = data;
236
237
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
238
239
if (rp->status)
240
return rp->status;
241
242
hdev->link_policy = __le16_to_cpu(rp->policy);
243
244
return rp->status;
245
}
246
247
static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
248
struct sk_buff *skb)
249
{
250
struct hci_ev_status *rp = data;
251
void *sent;
252
253
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
254
255
if (rp->status)
256
return rp->status;
257
258
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
259
if (!sent)
260
return rp->status;
261
262
hdev->link_policy = get_unaligned_le16(sent);
263
264
return rp->status;
265
}
266
267
static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
268
{
269
struct hci_ev_status *rp = data;
270
271
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
272
273
clear_bit(HCI_RESET, &hdev->flags);
274
275
if (rp->status)
276
return rp->status;
277
278
/* Reset all non-persistent flags */
279
hci_dev_clear_volatile_flags(hdev);
280
281
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
282
283
hdev->inq_tx_power = HCI_TX_POWER_INVALID;
284
hdev->adv_tx_power = HCI_TX_POWER_INVALID;
285
286
memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
287
hdev->adv_data_len = 0;
288
289
memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
290
hdev->scan_rsp_data_len = 0;
291
292
hdev->le_scan_type = LE_SCAN_PASSIVE;
293
294
hdev->ssp_debug_mode = 0;
295
296
hci_bdaddr_list_clear(&hdev->le_accept_list);
297
hci_bdaddr_list_clear(&hdev->le_resolv_list);
298
299
return rp->status;
300
}
301
302
static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
303
struct sk_buff *skb)
304
{
305
struct hci_rp_read_stored_link_key *rp = data;
306
struct hci_cp_read_stored_link_key *sent;
307
308
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
309
310
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
311
if (!sent)
312
return rp->status;
313
314
if (!rp->status && sent->read_all == 0x01) {
315
hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
316
hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
317
}
318
319
return rp->status;
320
}
321
322
static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
323
struct sk_buff *skb)
324
{
325
struct hci_rp_delete_stored_link_key *rp = data;
326
u16 num_keys;
327
328
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330
if (rp->status)
331
return rp->status;
332
333
num_keys = le16_to_cpu(rp->num_keys);
334
335
if (num_keys <= hdev->stored_num_keys)
336
hdev->stored_num_keys -= num_keys;
337
else
338
hdev->stored_num_keys = 0;
339
340
return rp->status;
341
}
342
343
static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
344
struct sk_buff *skb)
345
{
346
struct hci_ev_status *rp = data;
347
void *sent;
348
349
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
350
351
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
352
if (!sent)
353
return rp->status;
354
355
hci_dev_lock(hdev);
356
357
if (hci_dev_test_flag(hdev, HCI_MGMT))
358
mgmt_set_local_name_complete(hdev, sent, rp->status);
359
else if (!rp->status)
360
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
361
362
hci_dev_unlock(hdev);
363
364
return rp->status;
365
}
366
367
static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
368
struct sk_buff *skb)
369
{
370
struct hci_rp_read_local_name *rp = data;
371
372
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
373
374
if (rp->status)
375
return rp->status;
376
377
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
378
hci_dev_test_flag(hdev, HCI_CONFIG))
379
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
380
381
return rp->status;
382
}
383
384
static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
385
struct sk_buff *skb)
386
{
387
struct hci_ev_status *rp = data;
388
void *sent;
389
390
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
391
392
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
393
if (!sent)
394
return rp->status;
395
396
hci_dev_lock(hdev);
397
398
if (!rp->status) {
399
__u8 param = *((__u8 *) sent);
400
401
if (param == AUTH_ENABLED)
402
set_bit(HCI_AUTH, &hdev->flags);
403
else
404
clear_bit(HCI_AUTH, &hdev->flags);
405
}
406
407
if (hci_dev_test_flag(hdev, HCI_MGMT))
408
mgmt_auth_enable_complete(hdev, rp->status);
409
410
hci_dev_unlock(hdev);
411
412
return rp->status;
413
}
414
415
static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
416
struct sk_buff *skb)
417
{
418
struct hci_ev_status *rp = data;
419
__u8 param;
420
void *sent;
421
422
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
423
424
if (rp->status)
425
return rp->status;
426
427
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
428
if (!sent)
429
return rp->status;
430
431
param = *((__u8 *) sent);
432
433
if (param)
434
set_bit(HCI_ENCRYPT, &hdev->flags);
435
else
436
clear_bit(HCI_ENCRYPT, &hdev->flags);
437
438
return rp->status;
439
}
440
441
static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
442
struct sk_buff *skb)
443
{
444
struct hci_ev_status *rp = data;
445
__u8 param;
446
void *sent;
447
448
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
449
450
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
451
if (!sent)
452
return rp->status;
453
454
param = *((__u8 *) sent);
455
456
hci_dev_lock(hdev);
457
458
if (rp->status) {
459
hdev->discov_timeout = 0;
460
goto done;
461
}
462
463
if (param & SCAN_INQUIRY)
464
set_bit(HCI_ISCAN, &hdev->flags);
465
else
466
clear_bit(HCI_ISCAN, &hdev->flags);
467
468
if (param & SCAN_PAGE)
469
set_bit(HCI_PSCAN, &hdev->flags);
470
else
471
clear_bit(HCI_PSCAN, &hdev->flags);
472
473
done:
474
hci_dev_unlock(hdev);
475
476
return rp->status;
477
}
478
479
static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
480
struct sk_buff *skb)
481
{
482
struct hci_ev_status *rp = data;
483
struct hci_cp_set_event_filter *cp;
484
void *sent;
485
486
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
487
488
if (rp->status)
489
return rp->status;
490
491
sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
492
if (!sent)
493
return rp->status;
494
495
cp = (struct hci_cp_set_event_filter *)sent;
496
497
if (cp->flt_type == HCI_FLT_CLEAR_ALL)
498
hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
499
else
500
hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501
502
return rp->status;
503
}
504
505
static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
506
struct sk_buff *skb)
507
{
508
struct hci_rp_read_class_of_dev *rp = data;
509
510
if (WARN_ON(!hdev))
511
return HCI_ERROR_UNSPECIFIED;
512
513
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
514
515
if (rp->status)
516
return rp->status;
517
518
memcpy(hdev->dev_class, rp->dev_class, 3);
519
520
bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
521
hdev->dev_class[1], hdev->dev_class[0]);
522
523
return rp->status;
524
}
525
526
static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
527
struct sk_buff *skb)
528
{
529
struct hci_ev_status *rp = data;
530
void *sent;
531
532
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
533
534
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
535
if (!sent)
536
return rp->status;
537
538
hci_dev_lock(hdev);
539
540
if (!rp->status)
541
memcpy(hdev->dev_class, sent, 3);
542
543
if (hci_dev_test_flag(hdev, HCI_MGMT))
544
mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
545
546
hci_dev_unlock(hdev);
547
548
return rp->status;
549
}
550
551
static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
552
struct sk_buff *skb)
553
{
554
struct hci_rp_read_voice_setting *rp = data;
555
__u16 setting;
556
557
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
558
559
if (rp->status)
560
return rp->status;
561
562
setting = __le16_to_cpu(rp->voice_setting);
563
564
if (hdev->voice_setting == setting)
565
return rp->status;
566
567
hdev->voice_setting = setting;
568
569
bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
570
571
if (hdev->notify)
572
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
573
574
return rp->status;
575
}
576
577
static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
578
struct sk_buff *skb)
579
{
580
struct hci_ev_status *rp = data;
581
__u16 setting;
582
void *sent;
583
584
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
585
586
if (rp->status)
587
return rp->status;
588
589
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
590
if (!sent)
591
return rp->status;
592
593
setting = get_unaligned_le16(sent);
594
595
if (hdev->voice_setting == setting)
596
return rp->status;
597
598
hdev->voice_setting = setting;
599
600
bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
601
602
if (hdev->notify)
603
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
604
605
return rp->status;
606
}
607
608
static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
609
struct sk_buff *skb)
610
{
611
struct hci_rp_read_num_supported_iac *rp = data;
612
613
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
614
615
if (rp->status)
616
return rp->status;
617
618
hdev->num_iac = rp->num_iac;
619
620
bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
621
622
return rp->status;
623
}
624
625
static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
626
struct sk_buff *skb)
627
{
628
struct hci_ev_status *rp = data;
629
struct hci_cp_write_ssp_mode *sent;
630
631
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
632
633
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
634
if (!sent)
635
return rp->status;
636
637
hci_dev_lock(hdev);
638
639
if (!rp->status) {
640
if (sent->mode)
641
hdev->features[1][0] |= LMP_HOST_SSP;
642
else
643
hdev->features[1][0] &= ~LMP_HOST_SSP;
644
}
645
646
if (!rp->status) {
647
if (sent->mode)
648
hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
649
else
650
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
651
}
652
653
hci_dev_unlock(hdev);
654
655
return rp->status;
656
}
657
658
static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
659
struct sk_buff *skb)
660
{
661
struct hci_ev_status *rp = data;
662
struct hci_cp_write_sc_support *sent;
663
664
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
665
666
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
667
if (!sent)
668
return rp->status;
669
670
hci_dev_lock(hdev);
671
672
if (!rp->status) {
673
if (sent->support)
674
hdev->features[1][0] |= LMP_HOST_SC;
675
else
676
hdev->features[1][0] &= ~LMP_HOST_SC;
677
}
678
679
if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
680
if (sent->support)
681
hci_dev_set_flag(hdev, HCI_SC_ENABLED);
682
else
683
hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
684
}
685
686
hci_dev_unlock(hdev);
687
688
return rp->status;
689
}
690
691
static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
692
struct sk_buff *skb)
693
{
694
struct hci_rp_read_local_version *rp = data;
695
696
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
697
698
if (rp->status)
699
return rp->status;
700
701
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
702
hci_dev_test_flag(hdev, HCI_CONFIG)) {
703
hdev->hci_ver = rp->hci_ver;
704
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
705
hdev->lmp_ver = rp->lmp_ver;
706
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
707
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
708
}
709
710
return rp->status;
711
}
712
713
static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
714
struct sk_buff *skb)
715
{
716
struct hci_rp_read_enc_key_size *rp = data;
717
struct hci_conn *conn;
718
u16 handle;
719
u8 status = rp->status;
720
721
bt_dev_dbg(hdev, "status 0x%2.2x", status);
722
723
handle = le16_to_cpu(rp->handle);
724
725
hci_dev_lock(hdev);
726
727
conn = hci_conn_hash_lookup_handle(hdev, handle);
728
if (!conn) {
729
status = 0xFF;
730
goto done;
731
}
732
733
/* While unexpected, the read_enc_key_size command may fail. The most
734
* secure approach is to then assume the key size is 0 to force a
735
* disconnection.
736
*/
737
if (status) {
738
bt_dev_err(hdev, "failed to read key size for handle %u",
739
handle);
740
conn->enc_key_size = 0;
741
} else {
742
u8 *key_enc_size = hci_conn_key_enc_size(conn);
743
744
conn->enc_key_size = rp->key_size;
745
status = 0;
746
747
/* Attempt to check if the key size is too small or if it has
748
* been downgraded from the last time it was stored as part of
749
* the link_key.
750
*/
751
if (conn->enc_key_size < hdev->min_enc_key_size ||
752
(key_enc_size && conn->enc_key_size < *key_enc_size)) {
753
/* As slave role, the conn->state has been set to
754
* BT_CONNECTED and l2cap conn req might not be received
755
* yet, at this moment the l2cap layer almost does
756
* nothing with the non-zero status.
757
* So we also clear encrypt related bits, and then the
758
* handler of l2cap conn req will get the right secure
759
* state at a later time.
760
*/
761
status = HCI_ERROR_AUTH_FAILURE;
762
clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763
clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764
}
765
766
/* Update the key encryption size with the connection one */
767
if (key_enc_size && *key_enc_size != conn->enc_key_size)
768
*key_enc_size = conn->enc_key_size;
769
}
770
771
hci_encrypt_cfm(conn, status);
772
773
done:
774
hci_dev_unlock(hdev);
775
776
return status;
777
}
778
779
static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
780
struct sk_buff *skb)
781
{
782
struct hci_rp_read_local_commands *rp = data;
783
784
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
785
786
if (rp->status)
787
return rp->status;
788
789
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
790
hci_dev_test_flag(hdev, HCI_CONFIG))
791
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
792
793
return rp->status;
794
}
795
796
static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
797
struct sk_buff *skb)
798
{
799
struct hci_rp_read_auth_payload_to *rp = data;
800
struct hci_conn *conn;
801
802
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803
804
if (rp->status)
805
return rp->status;
806
807
hci_dev_lock(hdev);
808
809
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810
if (conn)
811
conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
812
813
hci_dev_unlock(hdev);
814
815
return rp->status;
816
}
817
818
static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
819
struct sk_buff *skb)
820
{
821
struct hci_rp_write_auth_payload_to *rp = data;
822
struct hci_conn *conn;
823
void *sent;
824
825
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
826
827
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828
if (!sent)
829
return rp->status;
830
831
hci_dev_lock(hdev);
832
833
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
834
if (!conn) {
835
rp->status = 0xff;
836
goto unlock;
837
}
838
839
if (!rp->status)
840
conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
841
842
unlock:
843
hci_dev_unlock(hdev);
844
845
return rp->status;
846
}
847
848
static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
849
struct sk_buff *skb)
850
{
851
struct hci_rp_read_local_features *rp = data;
852
853
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
854
855
if (rp->status)
856
return rp->status;
857
858
memcpy(hdev->features, rp->features, 8);
859
860
/* Adjust default settings according to features
861
* supported by device. */
862
863
if (hdev->features[0][0] & LMP_3SLOT)
864
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
865
866
if (hdev->features[0][0] & LMP_5SLOT)
867
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
868
869
if (hdev->features[0][1] & LMP_HV2) {
870
hdev->pkt_type |= (HCI_HV2);
871
hdev->esco_type |= (ESCO_HV2);
872
}
873
874
if (hdev->features[0][1] & LMP_HV3) {
875
hdev->pkt_type |= (HCI_HV3);
876
hdev->esco_type |= (ESCO_HV3);
877
}
878
879
if (lmp_esco_capable(hdev))
880
hdev->esco_type |= (ESCO_EV3);
881
882
if (hdev->features[0][4] & LMP_EV4)
883
hdev->esco_type |= (ESCO_EV4);
884
885
if (hdev->features[0][4] & LMP_EV5)
886
hdev->esco_type |= (ESCO_EV5);
887
888
if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
889
hdev->esco_type |= (ESCO_2EV3);
890
891
if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
892
hdev->esco_type |= (ESCO_3EV3);
893
894
if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
895
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
896
897
return rp->status;
898
}
899
900
static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
901
struct sk_buff *skb)
902
{
903
struct hci_rp_read_local_ext_features *rp = data;
904
905
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
906
907
if (rp->status)
908
return rp->status;
909
910
if (hdev->max_page < rp->max_page) {
911
if (hci_test_quirk(hdev,
912
HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
913
bt_dev_warn(hdev, "broken local ext features page 2");
914
else
915
hdev->max_page = rp->max_page;
916
}
917
918
if (rp->page < HCI_MAX_PAGES)
919
memcpy(hdev->features[rp->page], rp->features, 8);
920
921
return rp->status;
922
}
923
924
static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
925
struct sk_buff *skb)
926
{
927
struct hci_rp_read_buffer_size *rp = data;
928
929
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
930
931
if (rp->status)
932
return rp->status;
933
934
hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
935
hdev->sco_mtu = rp->sco_mtu;
936
hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
937
hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
938
939
if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
940
hdev->sco_mtu = 64;
941
hdev->sco_pkts = 8;
942
}
943
944
if (!read_voice_setting_capable(hdev))
945
hdev->sco_pkts = 0;
946
947
hdev->acl_cnt = hdev->acl_pkts;
948
hdev->sco_cnt = hdev->sco_pkts;
949
950
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
951
hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
952
953
if (!hdev->acl_mtu || !hdev->acl_pkts)
954
return HCI_ERROR_INVALID_PARAMETERS;
955
956
return rp->status;
957
}
958
959
static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
960
struct sk_buff *skb)
961
{
962
struct hci_rp_read_bd_addr *rp = data;
963
964
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
965
966
if (rp->status)
967
return rp->status;
968
969
if (test_bit(HCI_INIT, &hdev->flags))
970
bacpy(&hdev->bdaddr, &rp->bdaddr);
971
972
if (hci_dev_test_flag(hdev, HCI_SETUP))
973
bacpy(&hdev->setup_addr, &rp->bdaddr);
974
975
return rp->status;
976
}
977
978
static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
979
struct sk_buff *skb)
980
{
981
struct hci_rp_read_local_pairing_opts *rp = data;
982
983
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
984
985
if (rp->status)
986
return rp->status;
987
988
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
989
hci_dev_test_flag(hdev, HCI_CONFIG)) {
990
hdev->pairing_opts = rp->pairing_opts;
991
hdev->max_enc_key_size = rp->max_key_size;
992
}
993
994
return rp->status;
995
}
996
997
static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
998
struct sk_buff *skb)
999
{
1000
struct hci_rp_read_page_scan_activity *rp = data;
1001
1002
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1003
1004
if (rp->status)
1005
return rp->status;
1006
1007
if (test_bit(HCI_INIT, &hdev->flags)) {
1008
hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1009
hdev->page_scan_window = __le16_to_cpu(rp->window);
1010
}
1011
1012
return rp->status;
1013
}
1014
1015
static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1016
struct sk_buff *skb)
1017
{
1018
struct hci_ev_status *rp = data;
1019
struct hci_cp_write_page_scan_activity *sent;
1020
1021
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1022
1023
if (rp->status)
1024
return rp->status;
1025
1026
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1027
if (!sent)
1028
return rp->status;
1029
1030
hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1031
hdev->page_scan_window = __le16_to_cpu(sent->window);
1032
1033
return rp->status;
1034
}
1035
1036
static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1037
struct sk_buff *skb)
1038
{
1039
struct hci_rp_read_page_scan_type *rp = data;
1040
1041
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042
1043
if (rp->status)
1044
return rp->status;
1045
1046
if (test_bit(HCI_INIT, &hdev->flags))
1047
hdev->page_scan_type = rp->type;
1048
1049
return rp->status;
1050
}
1051
1052
static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1053
struct sk_buff *skb)
1054
{
1055
struct hci_ev_status *rp = data;
1056
u8 *type;
1057
1058
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059
1060
if (rp->status)
1061
return rp->status;
1062
1063
type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1064
if (type)
1065
hdev->page_scan_type = *type;
1066
1067
return rp->status;
1068
}
1069
1070
static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1071
struct sk_buff *skb)
1072
{
1073
struct hci_rp_read_clock *rp = data;
1074
struct hci_cp_read_clock *cp;
1075
struct hci_conn *conn;
1076
1077
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1078
1079
if (rp->status)
1080
return rp->status;
1081
1082
hci_dev_lock(hdev);
1083
1084
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1085
if (!cp)
1086
goto unlock;
1087
1088
if (cp->which == 0x00) {
1089
hdev->clock = le32_to_cpu(rp->clock);
1090
goto unlock;
1091
}
1092
1093
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1094
if (conn) {
1095
conn->clock = le32_to_cpu(rp->clock);
1096
conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1097
}
1098
1099
unlock:
1100
hci_dev_unlock(hdev);
1101
return rp->status;
1102
}
1103
1104
static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1105
struct sk_buff *skb)
1106
{
1107
struct hci_rp_read_inq_rsp_tx_power *rp = data;
1108
1109
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1110
1111
if (rp->status)
1112
return rp->status;
1113
1114
hdev->inq_tx_power = rp->tx_power;
1115
1116
return rp->status;
1117
}
1118
1119
static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1120
struct sk_buff *skb)
1121
{
1122
struct hci_rp_read_def_err_data_reporting *rp = data;
1123
1124
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1125
1126
if (rp->status)
1127
return rp->status;
1128
1129
hdev->err_data_reporting = rp->err_data_reporting;
1130
1131
return rp->status;
1132
}
1133
1134
static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1135
struct sk_buff *skb)
1136
{
1137
struct hci_ev_status *rp = data;
1138
struct hci_cp_write_def_err_data_reporting *cp;
1139
1140
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1141
1142
if (rp->status)
1143
return rp->status;
1144
1145
cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1146
if (!cp)
1147
return rp->status;
1148
1149
hdev->err_data_reporting = cp->err_data_reporting;
1150
1151
return rp->status;
1152
}
1153
1154
static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1155
struct sk_buff *skb)
1156
{
1157
struct hci_rp_pin_code_reply *rp = data;
1158
struct hci_cp_pin_code_reply *cp;
1159
struct hci_conn *conn;
1160
1161
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1162
1163
hci_dev_lock(hdev);
1164
1165
if (hci_dev_test_flag(hdev, HCI_MGMT))
1166
mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1167
1168
if (rp->status)
1169
goto unlock;
1170
1171
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1172
if (!cp)
1173
goto unlock;
1174
1175
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1176
if (conn)
1177
conn->pin_length = cp->pin_len;
1178
1179
unlock:
1180
hci_dev_unlock(hdev);
1181
return rp->status;
1182
}
1183
1184
static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1185
struct sk_buff *skb)
1186
{
1187
struct hci_rp_pin_code_neg_reply *rp = data;
1188
1189
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1190
1191
hci_dev_lock(hdev);
1192
1193
if (hci_dev_test_flag(hdev, HCI_MGMT))
1194
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1195
rp->status);
1196
1197
hci_dev_unlock(hdev);
1198
1199
return rp->status;
1200
}
1201
1202
static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1203
struct sk_buff *skb)
1204
{
1205
struct hci_rp_le_read_buffer_size *rp = data;
1206
1207
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1208
1209
if (rp->status)
1210
return rp->status;
1211
1212
hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1213
hdev->le_pkts = rp->le_max_pkt;
1214
1215
hdev->le_cnt = hdev->le_pkts;
1216
1217
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1218
1219
if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1220
return HCI_ERROR_INVALID_PARAMETERS;
1221
1222
return rp->status;
1223
}
1224
1225
static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1226
struct sk_buff *skb)
1227
{
1228
struct hci_rp_le_read_local_features *rp = data;
1229
1230
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1231
1232
if (rp->status)
1233
return rp->status;
1234
1235
memcpy(hdev->le_features, rp->features, 8);
1236
1237
return rp->status;
1238
}
1239
1240
static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1241
struct sk_buff *skb)
1242
{
1243
struct hci_rp_le_read_adv_tx_power *rp = data;
1244
1245
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1246
1247
if (rp->status)
1248
return rp->status;
1249
1250
hdev->adv_tx_power = rp->tx_power;
1251
1252
return rp->status;
1253
}
1254
1255
static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1256
struct sk_buff *skb)
1257
{
1258
struct hci_rp_user_confirm_reply *rp = data;
1259
1260
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261
1262
hci_dev_lock(hdev);
1263
1264
if (hci_dev_test_flag(hdev, HCI_MGMT))
1265
mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1266
rp->status);
1267
1268
hci_dev_unlock(hdev);
1269
1270
return rp->status;
1271
}
1272
1273
static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1274
struct sk_buff *skb)
1275
{
1276
struct hci_rp_user_confirm_reply *rp = data;
1277
1278
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1279
1280
hci_dev_lock(hdev);
1281
1282
if (hci_dev_test_flag(hdev, HCI_MGMT))
1283
mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1284
ACL_LINK, 0, rp->status);
1285
1286
hci_dev_unlock(hdev);
1287
1288
return rp->status;
1289
}
1290
1291
static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1292
struct sk_buff *skb)
1293
{
1294
struct hci_rp_user_confirm_reply *rp = data;
1295
1296
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1297
1298
hci_dev_lock(hdev);
1299
1300
if (hci_dev_test_flag(hdev, HCI_MGMT))
1301
mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1302
0, rp->status);
1303
1304
hci_dev_unlock(hdev);
1305
1306
return rp->status;
1307
}
1308
1309
static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1310
struct sk_buff *skb)
1311
{
1312
struct hci_rp_user_confirm_reply *rp = data;
1313
1314
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315
1316
hci_dev_lock(hdev);
1317
1318
if (hci_dev_test_flag(hdev, HCI_MGMT))
1319
mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1320
ACL_LINK, 0, rp->status);
1321
1322
hci_dev_unlock(hdev);
1323
1324
return rp->status;
1325
}
1326
1327
static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1328
struct sk_buff *skb)
1329
{
1330
struct hci_rp_read_local_oob_data *rp = data;
1331
1332
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333
1334
return rp->status;
1335
}
1336
1337
static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1338
struct sk_buff *skb)
1339
{
1340
struct hci_rp_read_local_oob_ext_data *rp = data;
1341
1342
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1343
1344
return rp->status;
1345
}
1346
1347
static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1348
struct sk_buff *skb)
1349
{
1350
struct hci_ev_status *rp = data;
1351
bdaddr_t *sent;
1352
1353
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1354
1355
if (rp->status)
1356
return rp->status;
1357
1358
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1359
if (!sent)
1360
return rp->status;
1361
1362
hci_dev_lock(hdev);
1363
1364
bacpy(&hdev->random_addr, sent);
1365
1366
if (!bacmp(&hdev->rpa, sent)) {
1367
hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1368
queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1369
secs_to_jiffies(hdev->rpa_timeout));
1370
}
1371
1372
hci_dev_unlock(hdev);
1373
1374
return rp->status;
1375
}
1376
1377
static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1378
struct sk_buff *skb)
1379
{
1380
struct hci_ev_status *rp = data;
1381
struct hci_cp_le_set_default_phy *cp;
1382
1383
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384
1385
if (rp->status)
1386
return rp->status;
1387
1388
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1389
if (!cp)
1390
return rp->status;
1391
1392
hci_dev_lock(hdev);
1393
1394
hdev->le_tx_def_phys = cp->tx_phys;
1395
hdev->le_rx_def_phys = cp->rx_phys;
1396
1397
hci_dev_unlock(hdev);
1398
1399
return rp->status;
1400
}
1401
1402
static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1403
struct sk_buff *skb)
1404
{
1405
struct hci_ev_status *rp = data;
1406
struct hci_cp_le_set_adv_set_rand_addr *cp;
1407
struct adv_info *adv;
1408
1409
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410
1411
if (rp->status)
1412
return rp->status;
1413
1414
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1415
/* Update only in case the adv instance since handle 0x00 shall be using
1416
* HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1417
* non-extended adverting.
1418
*/
1419
if (!cp || !cp->handle)
1420
return rp->status;
1421
1422
hci_dev_lock(hdev);
1423
1424
adv = hci_find_adv_instance(hdev, cp->handle);
1425
if (adv) {
1426
bacpy(&adv->random_addr, &cp->bdaddr);
1427
if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1428
adv->rpa_expired = false;
1429
queue_delayed_work(hdev->workqueue,
1430
&adv->rpa_expired_cb,
1431
secs_to_jiffies(hdev->rpa_timeout));
1432
}
1433
}
1434
1435
hci_dev_unlock(hdev);
1436
1437
return rp->status;
1438
}
1439
1440
static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1441
struct sk_buff *skb)
1442
{
1443
struct hci_ev_status *rp = data;
1444
u8 *instance;
1445
int err;
1446
1447
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1448
1449
if (rp->status)
1450
return rp->status;
1451
1452
instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1453
if (!instance)
1454
return rp->status;
1455
1456
hci_dev_lock(hdev);
1457
1458
err = hci_remove_adv_instance(hdev, *instance);
1459
if (!err)
1460
mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1461
*instance);
1462
1463
hci_dev_unlock(hdev);
1464
1465
return rp->status;
1466
}
1467
1468
static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1469
struct sk_buff *skb)
1470
{
1471
struct hci_ev_status *rp = data;
1472
struct adv_info *adv, *n;
1473
int err;
1474
1475
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1476
1477
if (rp->status)
1478
return rp->status;
1479
1480
if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1481
return rp->status;
1482
1483
hci_dev_lock(hdev);
1484
1485
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1486
u8 instance = adv->instance;
1487
1488
err = hci_remove_adv_instance(hdev, instance);
1489
if (!err)
1490
mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1491
hdev, instance);
1492
}
1493
1494
hci_dev_unlock(hdev);
1495
1496
return rp->status;
1497
}
1498
1499
static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1500
struct sk_buff *skb)
1501
{
1502
struct hci_rp_le_read_transmit_power *rp = data;
1503
1504
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1505
1506
if (rp->status)
1507
return rp->status;
1508
1509
hdev->min_le_tx_power = rp->min_le_tx_power;
1510
hdev->max_le_tx_power = rp->max_le_tx_power;
1511
1512
return rp->status;
1513
}
1514
1515
static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1516
struct sk_buff *skb)
1517
{
1518
struct hci_ev_status *rp = data;
1519
struct hci_cp_le_set_privacy_mode *cp;
1520
struct hci_conn_params *params;
1521
1522
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1523
1524
if (rp->status)
1525
return rp->status;
1526
1527
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1528
if (!cp)
1529
return rp->status;
1530
1531
hci_dev_lock(hdev);
1532
1533
params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1534
if (params)
1535
WRITE_ONCE(params->privacy_mode, cp->mode);
1536
1537
hci_dev_unlock(hdev);
1538
1539
return rp->status;
1540
}
1541
1542
static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1543
struct sk_buff *skb)
1544
{
1545
struct hci_ev_status *rp = data;
1546
__u8 *sent;
1547
1548
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550
if (rp->status)
1551
return rp->status;
1552
1553
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1554
if (!sent)
1555
return rp->status;
1556
1557
hci_dev_lock(hdev);
1558
1559
/* If we're doing connection initiation as peripheral. Set a
1560
* timeout in case something goes wrong.
1561
*/
1562
if (*sent) {
1563
struct hci_conn *conn;
1564
1565
hci_dev_set_flag(hdev, HCI_LE_ADV);
1566
1567
conn = hci_lookup_le_connect(hdev);
1568
if (conn)
1569
queue_delayed_work(hdev->workqueue,
1570
&conn->le_conn_timeout,
1571
conn->conn_timeout);
1572
} else {
1573
hci_dev_clear_flag(hdev, HCI_LE_ADV);
1574
}
1575
1576
hci_dev_unlock(hdev);
1577
1578
return rp->status;
1579
}
1580
1581
static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1582
struct sk_buff *skb)
1583
{
1584
struct hci_cp_le_set_ext_adv_enable *cp;
1585
struct hci_cp_ext_adv_set *set;
1586
struct adv_info *adv = NULL, *n;
1587
struct hci_ev_status *rp = data;
1588
1589
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590
1591
if (rp->status)
1592
return rp->status;
1593
1594
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1595
if (!cp)
1596
return rp->status;
1597
1598
set = (void *)cp->data;
1599
1600
hci_dev_lock(hdev);
1601
1602
if (cp->num_of_sets)
1603
adv = hci_find_adv_instance(hdev, set->handle);
1604
1605
if (cp->enable) {
1606
struct hci_conn *conn;
1607
1608
hci_dev_set_flag(hdev, HCI_LE_ADV);
1609
1610
if (adv && !adv->periodic)
1611
adv->enabled = true;
1612
1613
conn = hci_lookup_le_connect(hdev);
1614
if (conn)
1615
queue_delayed_work(hdev->workqueue,
1616
&conn->le_conn_timeout,
1617
conn->conn_timeout);
1618
} else {
1619
if (cp->num_of_sets) {
1620
if (adv)
1621
adv->enabled = false;
1622
1623
/* If just one instance was disabled check if there are
1624
* any other instance enabled before clearing HCI_LE_ADV
1625
*/
1626
list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1627
list) {
1628
if (adv->enabled)
1629
goto unlock;
1630
}
1631
} else {
1632
/* All instances shall be considered disabled */
1633
list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1634
list)
1635
adv->enabled = false;
1636
}
1637
1638
hci_dev_clear_flag(hdev, HCI_LE_ADV);
1639
}
1640
1641
unlock:
1642
hci_dev_unlock(hdev);
1643
return rp->status;
1644
}
1645
1646
static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1647
struct sk_buff *skb)
1648
{
1649
struct hci_cp_le_set_scan_param *cp;
1650
struct hci_ev_status *rp = data;
1651
1652
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1653
1654
if (rp->status)
1655
return rp->status;
1656
1657
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1658
if (!cp)
1659
return rp->status;
1660
1661
hci_dev_lock(hdev);
1662
1663
hdev->le_scan_type = cp->type;
1664
1665
hci_dev_unlock(hdev);
1666
1667
return rp->status;
1668
}
1669
1670
static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1671
struct sk_buff *skb)
1672
{
1673
struct hci_cp_le_set_ext_scan_params *cp;
1674
struct hci_ev_status *rp = data;
1675
struct hci_cp_le_scan_phy_params *phy_param;
1676
1677
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1678
1679
if (rp->status)
1680
return rp->status;
1681
1682
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1683
if (!cp)
1684
return rp->status;
1685
1686
phy_param = (void *)cp->data;
1687
1688
hci_dev_lock(hdev);
1689
1690
hdev->le_scan_type = phy_param->type;
1691
1692
hci_dev_unlock(hdev);
1693
1694
return rp->status;
1695
}
1696
1697
static bool has_pending_adv_report(struct hci_dev *hdev)
1698
{
1699
struct discovery_state *d = &hdev->discovery;
1700
1701
return bacmp(&d->last_adv_addr, BDADDR_ANY);
1702
}
1703
1704
static void clear_pending_adv_report(struct hci_dev *hdev)
1705
{
1706
struct discovery_state *d = &hdev->discovery;
1707
1708
bacpy(&d->last_adv_addr, BDADDR_ANY);
1709
d->last_adv_data_len = 0;
1710
}
1711
1712
static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1713
u8 bdaddr_type, s8 rssi, u32 flags,
1714
u8 *data, u8 len)
1715
{
1716
struct discovery_state *d = &hdev->discovery;
1717
1718
if (len > max_adv_len(hdev))
1719
return;
1720
1721
bacpy(&d->last_adv_addr, bdaddr);
1722
d->last_adv_addr_type = bdaddr_type;
1723
d->last_adv_rssi = rssi;
1724
d->last_adv_flags = flags;
1725
memcpy(d->last_adv_data, data, len);
1726
d->last_adv_data_len = len;
1727
}
1728
1729
static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1730
{
1731
hci_dev_lock(hdev);
1732
1733
switch (enable) {
1734
case LE_SCAN_ENABLE:
1735
hci_dev_set_flag(hdev, HCI_LE_SCAN);
1736
if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1737
clear_pending_adv_report(hdev);
1738
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1739
}
1740
break;
1741
1742
case LE_SCAN_DISABLE:
1743
/* We do this here instead of when setting DISCOVERY_STOPPED
1744
* since the latter would potentially require waiting for
1745
* inquiry to stop too.
1746
*/
1747
if (has_pending_adv_report(hdev)) {
1748
struct discovery_state *d = &hdev->discovery;
1749
1750
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1751
d->last_adv_addr_type, NULL,
1752
d->last_adv_rssi, d->last_adv_flags,
1753
d->last_adv_data,
1754
d->last_adv_data_len, NULL, 0, 0);
1755
}
1756
1757
/* Cancel this timer so that we don't try to disable scanning
1758
* when it's already disabled.
1759
*/
1760
cancel_delayed_work(&hdev->le_scan_disable);
1761
1762
hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1763
1764
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1765
* interrupted scanning due to a connect request. Mark
1766
* therefore discovery as stopped.
1767
*/
1768
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1769
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1770
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1771
hdev->discovery.state == DISCOVERY_FINDING)
1772
queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1773
1774
break;
1775
1776
default:
1777
bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1778
enable);
1779
break;
1780
}
1781
1782
hci_dev_unlock(hdev);
1783
}
1784
1785
static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1786
struct sk_buff *skb)
1787
{
1788
struct hci_cp_le_set_scan_enable *cp;
1789
struct hci_ev_status *rp = data;
1790
1791
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1792
1793
if (rp->status)
1794
return rp->status;
1795
1796
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1797
if (!cp)
1798
return rp->status;
1799
1800
le_set_scan_enable_complete(hdev, cp->enable);
1801
1802
return rp->status;
1803
}
1804
1805
static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1806
struct sk_buff *skb)
1807
{
1808
struct hci_cp_le_set_ext_scan_enable *cp;
1809
struct hci_ev_status *rp = data;
1810
1811
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1812
1813
if (rp->status)
1814
return rp->status;
1815
1816
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1817
if (!cp)
1818
return rp->status;
1819
1820
le_set_scan_enable_complete(hdev, cp->enable);
1821
1822
return rp->status;
1823
}
1824
1825
static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1826
struct sk_buff *skb)
1827
{
1828
struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1829
1830
bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1831
rp->num_of_sets);
1832
1833
if (rp->status)
1834
return rp->status;
1835
1836
hdev->le_num_of_adv_sets = rp->num_of_sets;
1837
1838
return rp->status;
1839
}
1840
1841
static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1842
struct sk_buff *skb)
1843
{
1844
struct hci_rp_le_read_accept_list_size *rp = data;
1845
1846
bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1847
1848
if (rp->status)
1849
return rp->status;
1850
1851
hdev->le_accept_list_size = rp->size;
1852
1853
return rp->status;
1854
}
1855
1856
static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1857
struct sk_buff *skb)
1858
{
1859
struct hci_ev_status *rp = data;
1860
1861
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1862
1863
if (rp->status)
1864
return rp->status;
1865
1866
hci_dev_lock(hdev);
1867
hci_bdaddr_list_clear(&hdev->le_accept_list);
1868
hci_dev_unlock(hdev);
1869
1870
return rp->status;
1871
}
1872
1873
static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1874
struct sk_buff *skb)
1875
{
1876
struct hci_cp_le_add_to_accept_list *sent;
1877
struct hci_ev_status *rp = data;
1878
1879
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1880
1881
if (rp->status)
1882
return rp->status;
1883
1884
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1885
if (!sent)
1886
return rp->status;
1887
1888
hci_dev_lock(hdev);
1889
hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1890
sent->bdaddr_type);
1891
hci_dev_unlock(hdev);
1892
1893
return rp->status;
1894
}
1895
1896
static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1897
struct sk_buff *skb)
1898
{
1899
struct hci_cp_le_del_from_accept_list *sent;
1900
struct hci_ev_status *rp = data;
1901
1902
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1903
1904
if (rp->status)
1905
return rp->status;
1906
1907
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1908
if (!sent)
1909
return rp->status;
1910
1911
hci_dev_lock(hdev);
1912
hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1913
sent->bdaddr_type);
1914
hci_dev_unlock(hdev);
1915
1916
return rp->status;
1917
}
1918
1919
static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1920
struct sk_buff *skb)
1921
{
1922
struct hci_rp_le_read_supported_states *rp = data;
1923
1924
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1925
1926
if (rp->status)
1927
return rp->status;
1928
1929
memcpy(hdev->le_states, rp->le_states, 8);
1930
1931
return rp->status;
1932
}
1933
1934
static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1935
struct sk_buff *skb)
1936
{
1937
struct hci_rp_le_read_def_data_len *rp = data;
1938
1939
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1940
1941
if (rp->status)
1942
return rp->status;
1943
1944
hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1945
hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1946
1947
return rp->status;
1948
}
1949
1950
static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1951
struct sk_buff *skb)
1952
{
1953
struct hci_cp_le_write_def_data_len *sent;
1954
struct hci_ev_status *rp = data;
1955
1956
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1957
1958
if (rp->status)
1959
return rp->status;
1960
1961
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1962
if (!sent)
1963
return rp->status;
1964
1965
hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1966
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1967
1968
return rp->status;
1969
}
1970
1971
static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1972
struct sk_buff *skb)
1973
{
1974
struct hci_cp_le_add_to_resolv_list *sent;
1975
struct hci_ev_status *rp = data;
1976
1977
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1978
1979
if (rp->status)
1980
return rp->status;
1981
1982
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1983
if (!sent)
1984
return rp->status;
1985
1986
hci_dev_lock(hdev);
1987
hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1988
sent->bdaddr_type, sent->peer_irk,
1989
sent->local_irk);
1990
hci_dev_unlock(hdev);
1991
1992
return rp->status;
1993
}
1994
1995
static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1996
struct sk_buff *skb)
1997
{
1998
struct hci_cp_le_del_from_resolv_list *sent;
1999
struct hci_ev_status *rp = data;
2000
2001
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2002
2003
if (rp->status)
2004
return rp->status;
2005
2006
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2007
if (!sent)
2008
return rp->status;
2009
2010
hci_dev_lock(hdev);
2011
hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2012
sent->bdaddr_type);
2013
hci_dev_unlock(hdev);
2014
2015
return rp->status;
2016
}
2017
2018
static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2019
struct sk_buff *skb)
2020
{
2021
struct hci_ev_status *rp = data;
2022
2023
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2024
2025
if (rp->status)
2026
return rp->status;
2027
2028
hci_dev_lock(hdev);
2029
hci_bdaddr_list_clear(&hdev->le_resolv_list);
2030
hci_dev_unlock(hdev);
2031
2032
return rp->status;
2033
}
2034
2035
static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2036
struct sk_buff *skb)
2037
{
2038
struct hci_rp_le_read_resolv_list_size *rp = data;
2039
2040
bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2041
2042
if (rp->status)
2043
return rp->status;
2044
2045
hdev->le_resolv_list_size = rp->size;
2046
2047
return rp->status;
2048
}
2049
2050
static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2051
struct sk_buff *skb)
2052
{
2053
struct hci_ev_status *rp = data;
2054
__u8 *sent;
2055
2056
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2057
2058
if (rp->status)
2059
return rp->status;
2060
2061
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2062
if (!sent)
2063
return rp->status;
2064
2065
hci_dev_lock(hdev);
2066
2067
if (*sent)
2068
hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2069
else
2070
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2071
2072
hci_dev_unlock(hdev);
2073
2074
return rp->status;
2075
}
2076
2077
static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2078
struct sk_buff *skb)
2079
{
2080
struct hci_rp_le_read_max_data_len *rp = data;
2081
2082
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2083
2084
if (rp->status)
2085
return rp->status;
2086
2087
hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2088
hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2089
hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2090
hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2091
2092
return rp->status;
2093
}
2094
2095
static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2096
struct sk_buff *skb)
2097
{
2098
struct hci_cp_write_le_host_supported *sent;
2099
struct hci_ev_status *rp = data;
2100
2101
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2102
2103
if (rp->status)
2104
return rp->status;
2105
2106
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2107
if (!sent)
2108
return rp->status;
2109
2110
hci_dev_lock(hdev);
2111
2112
if (sent->le) {
2113
hdev->features[1][0] |= LMP_HOST_LE;
2114
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2115
} else {
2116
hdev->features[1][0] &= ~LMP_HOST_LE;
2117
hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2118
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119
}
2120
2121
if (sent->simul)
2122
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2123
else
2124
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2125
2126
hci_dev_unlock(hdev);
2127
2128
return rp->status;
2129
}
2130
2131
static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2132
struct sk_buff *skb)
2133
{
2134
struct hci_cp_le_set_adv_param *cp;
2135
struct hci_ev_status *rp = data;
2136
2137
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2138
2139
if (rp->status)
2140
return rp->status;
2141
2142
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2143
if (!cp)
2144
return rp->status;
2145
2146
hci_dev_lock(hdev);
2147
hdev->adv_addr_type = cp->own_address_type;
2148
hci_dev_unlock(hdev);
2149
2150
return rp->status;
2151
}
2152
2153
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2154
struct sk_buff *skb)
2155
{
2156
struct hci_rp_read_rssi *rp = data;
2157
struct hci_conn *conn;
2158
2159
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2160
2161
if (rp->status)
2162
return rp->status;
2163
2164
hci_dev_lock(hdev);
2165
2166
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2167
if (conn)
2168
conn->rssi = rp->rssi;
2169
2170
hci_dev_unlock(hdev);
2171
2172
return rp->status;
2173
}
2174
2175
static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2176
struct sk_buff *skb)
2177
{
2178
struct hci_cp_read_tx_power *sent;
2179
struct hci_rp_read_tx_power *rp = data;
2180
struct hci_conn *conn;
2181
2182
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2183
2184
if (rp->status)
2185
return rp->status;
2186
2187
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2188
if (!sent)
2189
return rp->status;
2190
2191
hci_dev_lock(hdev);
2192
2193
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2194
if (!conn)
2195
goto unlock;
2196
2197
switch (sent->type) {
2198
case 0x00:
2199
conn->tx_power = rp->tx_power;
2200
break;
2201
case 0x01:
2202
conn->max_tx_power = rp->tx_power;
2203
break;
2204
}
2205
2206
unlock:
2207
hci_dev_unlock(hdev);
2208
return rp->status;
2209
}
2210
2211
static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2212
struct sk_buff *skb)
2213
{
2214
struct hci_ev_status *rp = data;
2215
u8 *mode;
2216
2217
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2218
2219
if (rp->status)
2220
return rp->status;
2221
2222
mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2223
if (mode)
2224
hdev->ssp_debug_mode = *mode;
2225
2226
return rp->status;
2227
}
2228
2229
static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2230
{
2231
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2232
2233
if (status)
2234
return;
2235
2236
if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2237
set_bit(HCI_INQUIRY, &hdev->flags);
2238
}
2239
2240
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2241
{
2242
struct hci_cp_create_conn *cp;
2243
struct hci_conn *conn;
2244
2245
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2246
2247
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2248
if (!cp)
2249
return;
2250
2251
hci_dev_lock(hdev);
2252
2253
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2254
2255
bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2256
2257
if (status) {
2258
if (conn && conn->state == BT_CONNECT) {
2259
conn->state = BT_CLOSED;
2260
hci_connect_cfm(conn, status);
2261
hci_conn_del(conn);
2262
}
2263
} else {
2264
if (!conn) {
2265
conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2266
HCI_ROLE_MASTER);
2267
if (IS_ERR(conn))
2268
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2269
}
2270
}
2271
2272
hci_dev_unlock(hdev);
2273
}
2274
2275
static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2276
{
2277
struct hci_cp_add_sco *cp;
2278
struct hci_conn *acl;
2279
struct hci_link *link;
2280
__u16 handle;
2281
2282
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2283
2284
if (!status)
2285
return;
2286
2287
cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2288
if (!cp)
2289
return;
2290
2291
handle = __le16_to_cpu(cp->handle);
2292
2293
bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2294
2295
hci_dev_lock(hdev);
2296
2297
acl = hci_conn_hash_lookup_handle(hdev, handle);
2298
if (acl) {
2299
link = list_first_entry_or_null(&acl->link_list,
2300
struct hci_link, list);
2301
if (link && link->conn) {
2302
link->conn->state = BT_CLOSED;
2303
2304
hci_connect_cfm(link->conn, status);
2305
hci_conn_del(link->conn);
2306
}
2307
}
2308
2309
hci_dev_unlock(hdev);
2310
}
2311
2312
static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2313
{
2314
struct hci_cp_auth_requested *cp;
2315
struct hci_conn *conn;
2316
2317
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2318
2319
if (!status)
2320
return;
2321
2322
cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2323
if (!cp)
2324
return;
2325
2326
hci_dev_lock(hdev);
2327
2328
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2329
if (conn) {
2330
if (conn->state == BT_CONFIG) {
2331
hci_connect_cfm(conn, status);
2332
hci_conn_drop(conn);
2333
}
2334
}
2335
2336
hci_dev_unlock(hdev);
2337
}
2338
2339
static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2340
{
2341
struct hci_cp_set_conn_encrypt *cp;
2342
struct hci_conn *conn;
2343
2344
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2345
2346
if (!status)
2347
return;
2348
2349
cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2350
if (!cp)
2351
return;
2352
2353
hci_dev_lock(hdev);
2354
2355
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2356
if (conn) {
2357
if (conn->state == BT_CONFIG) {
2358
hci_connect_cfm(conn, status);
2359
hci_conn_drop(conn);
2360
}
2361
}
2362
2363
hci_dev_unlock(hdev);
2364
}
2365
2366
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2367
struct hci_conn *conn)
2368
{
2369
if (conn->state != BT_CONFIG || !conn->out)
2370
return 0;
2371
2372
if (conn->pending_sec_level == BT_SECURITY_SDP)
2373
return 0;
2374
2375
/* Only request authentication for SSP connections or non-SSP
2376
* devices with sec_level MEDIUM or HIGH or if MITM protection
2377
* is requested.
2378
*/
2379
if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2380
conn->pending_sec_level != BT_SECURITY_FIPS &&
2381
conn->pending_sec_level != BT_SECURITY_HIGH &&
2382
conn->pending_sec_level != BT_SECURITY_MEDIUM)
2383
return 0;
2384
2385
return 1;
2386
}
2387
2388
static int hci_resolve_name(struct hci_dev *hdev,
2389
struct inquiry_entry *e)
2390
{
2391
struct hci_cp_remote_name_req cp;
2392
2393
memset(&cp, 0, sizeof(cp));
2394
2395
bacpy(&cp.bdaddr, &e->data.bdaddr);
2396
cp.pscan_rep_mode = e->data.pscan_rep_mode;
2397
cp.pscan_mode = e->data.pscan_mode;
2398
cp.clock_offset = e->data.clock_offset;
2399
2400
return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2401
}
2402
2403
static bool hci_resolve_next_name(struct hci_dev *hdev)
2404
{
2405
struct discovery_state *discov = &hdev->discovery;
2406
struct inquiry_entry *e;
2407
2408
if (list_empty(&discov->resolve))
2409
return false;
2410
2411
/* We should stop if we already spent too much time resolving names. */
2412
if (time_after(jiffies, discov->name_resolve_timeout)) {
2413
bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2414
return false;
2415
}
2416
2417
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2418
if (!e)
2419
return false;
2420
2421
if (hci_resolve_name(hdev, e) == 0) {
2422
e->name_state = NAME_PENDING;
2423
return true;
2424
}
2425
2426
return false;
2427
}
2428
2429
static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2430
bdaddr_t *bdaddr, u8 *name, u8 name_len)
2431
{
2432
struct discovery_state *discov = &hdev->discovery;
2433
struct inquiry_entry *e;
2434
2435
/* Update the mgmt connected state if necessary. Be careful with
2436
* conn objects that exist but are not (yet) connected however.
2437
* Only those in BT_CONFIG or BT_CONNECTED states can be
2438
* considered connected.
2439
*/
2440
if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2441
mgmt_device_connected(hdev, conn, name, name_len);
2442
2443
if (discov->state == DISCOVERY_STOPPED)
2444
return;
2445
2446
if (discov->state == DISCOVERY_STOPPING)
2447
goto discov_complete;
2448
2449
if (discov->state != DISCOVERY_RESOLVING)
2450
return;
2451
2452
e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2453
/* If the device was not found in a list of found devices names of which
2454
* are pending. there is no need to continue resolving a next name as it
2455
* will be done upon receiving another Remote Name Request Complete
2456
* Event */
2457
if (!e)
2458
return;
2459
2460
list_del(&e->list);
2461
2462
e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2463
mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2464
name, name_len);
2465
2466
if (hci_resolve_next_name(hdev))
2467
return;
2468
2469
discov_complete:
2470
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2471
}
2472
2473
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2474
{
2475
struct hci_cp_remote_name_req *cp;
2476
struct hci_conn *conn;
2477
2478
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2479
2480
/* If successful wait for the name req complete event before
2481
* checking for the need to do authentication */
2482
if (!status)
2483
return;
2484
2485
cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2486
if (!cp)
2487
return;
2488
2489
hci_dev_lock(hdev);
2490
2491
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2492
2493
if (hci_dev_test_flag(hdev, HCI_MGMT))
2494
hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2495
2496
if (!conn)
2497
goto unlock;
2498
2499
if (!hci_outgoing_auth_needed(hdev, conn))
2500
goto unlock;
2501
2502
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2503
struct hci_cp_auth_requested auth_cp;
2504
2505
set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2506
2507
auth_cp.handle = __cpu_to_le16(conn->handle);
2508
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2509
sizeof(auth_cp), &auth_cp);
2510
}
2511
2512
unlock:
2513
hci_dev_unlock(hdev);
2514
}
2515
2516
static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2517
{
2518
struct hci_cp_read_remote_features *cp;
2519
struct hci_conn *conn;
2520
2521
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2522
2523
if (!status)
2524
return;
2525
2526
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2527
if (!cp)
2528
return;
2529
2530
hci_dev_lock(hdev);
2531
2532
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2533
if (conn) {
2534
if (conn->state == BT_CONFIG) {
2535
hci_connect_cfm(conn, status);
2536
hci_conn_drop(conn);
2537
}
2538
}
2539
2540
hci_dev_unlock(hdev);
2541
}
2542
2543
static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2544
{
2545
struct hci_cp_read_remote_ext_features *cp;
2546
struct hci_conn *conn;
2547
2548
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2549
2550
if (!status)
2551
return;
2552
2553
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2554
if (!cp)
2555
return;
2556
2557
hci_dev_lock(hdev);
2558
2559
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2560
if (conn) {
2561
if (conn->state == BT_CONFIG) {
2562
hci_connect_cfm(conn, status);
2563
hci_conn_drop(conn);
2564
}
2565
}
2566
2567
hci_dev_unlock(hdev);
2568
}
2569
2570
static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2571
__u8 status)
2572
{
2573
struct hci_conn *acl;
2574
struct hci_link *link;
2575
2576
bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2577
2578
hci_dev_lock(hdev);
2579
2580
acl = hci_conn_hash_lookup_handle(hdev, handle);
2581
if (acl) {
2582
link = list_first_entry_or_null(&acl->link_list,
2583
struct hci_link, list);
2584
if (link && link->conn) {
2585
link->conn->state = BT_CLOSED;
2586
2587
hci_connect_cfm(link->conn, status);
2588
hci_conn_del(link->conn);
2589
}
2590
}
2591
2592
hci_dev_unlock(hdev);
2593
}
2594
2595
static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2596
{
2597
struct hci_cp_setup_sync_conn *cp;
2598
2599
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2600
2601
if (!status)
2602
return;
2603
2604
cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2605
if (!cp)
2606
return;
2607
2608
hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2609
}
2610
2611
static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2612
{
2613
struct hci_cp_enhanced_setup_sync_conn *cp;
2614
2615
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2616
2617
if (!status)
2618
return;
2619
2620
cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2621
if (!cp)
2622
return;
2623
2624
hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2625
}
2626
2627
static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2628
{
2629
struct hci_cp_sniff_mode *cp;
2630
struct hci_conn *conn;
2631
2632
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2633
2634
if (!status)
2635
return;
2636
2637
cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2638
if (!cp)
2639
return;
2640
2641
hci_dev_lock(hdev);
2642
2643
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2644
if (conn) {
2645
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2646
2647
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2648
hci_sco_setup(conn, status);
2649
}
2650
2651
hci_dev_unlock(hdev);
2652
}
2653
2654
static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2655
{
2656
struct hci_cp_exit_sniff_mode *cp;
2657
struct hci_conn *conn;
2658
2659
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2660
2661
if (!status)
2662
return;
2663
2664
cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2665
if (!cp)
2666
return;
2667
2668
hci_dev_lock(hdev);
2669
2670
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2671
if (conn) {
2672
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2673
2674
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2675
hci_sco_setup(conn, status);
2676
}
2677
2678
hci_dev_unlock(hdev);
2679
}
2680
2681
static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2682
{
2683
struct hci_cp_disconnect *cp;
2684
struct hci_conn_params *params;
2685
struct hci_conn *conn;
2686
bool mgmt_conn;
2687
2688
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2689
2690
/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2691
* otherwise cleanup the connection immediately.
2692
*/
2693
if (!status && !hdev->suspended)
2694
return;
2695
2696
cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2697
if (!cp)
2698
return;
2699
2700
hci_dev_lock(hdev);
2701
2702
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2703
if (!conn)
2704
goto unlock;
2705
2706
if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
2707
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2708
conn->dst_type, status);
2709
2710
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2711
hdev->cur_adv_instance = conn->adv_instance;
2712
hci_enable_advertising(hdev);
2713
}
2714
2715
/* Inform sockets conn is gone before we delete it */
2716
hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2717
2718
goto done;
2719
}
2720
2721
/* During suspend, mark connection as closed immediately
2722
* since we might not receive HCI_EV_DISCONN_COMPLETE
2723
*/
2724
if (hdev->suspended)
2725
conn->state = BT_CLOSED;
2726
2727
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2728
2729
if (conn->type == ACL_LINK) {
2730
if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2731
hci_remove_link_key(hdev, &conn->dst);
2732
}
2733
2734
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2735
if (params) {
2736
switch (params->auto_connect) {
2737
case HCI_AUTO_CONN_LINK_LOSS:
2738
if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2739
break;
2740
fallthrough;
2741
2742
case HCI_AUTO_CONN_DIRECT:
2743
case HCI_AUTO_CONN_ALWAYS:
2744
hci_pend_le_list_del_init(params);
2745
hci_pend_le_list_add(params, &hdev->pend_le_conns);
2746
break;
2747
2748
default:
2749
break;
2750
}
2751
}
2752
2753
mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2754
cp->reason, mgmt_conn);
2755
2756
hci_disconn_cfm(conn, cp->reason);
2757
2758
done:
2759
/* If the disconnection failed for any reason, the upper layer
2760
* does not retry to disconnect in current implementation.
2761
* Hence, we need to do some basic cleanup here and re-enable
2762
* advertising if necessary.
2763
*/
2764
hci_conn_del(conn);
2765
unlock:
2766
hci_dev_unlock(hdev);
2767
}
2768
2769
static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2770
{
2771
/* When using controller based address resolution, then the new
2772
* address types 0x02 and 0x03 are used. These types need to be
2773
* converted back into either public address or random address type
2774
*/
2775
switch (type) {
2776
case ADDR_LE_DEV_PUBLIC_RESOLVED:
2777
if (resolved)
2778
*resolved = true;
2779
return ADDR_LE_DEV_PUBLIC;
2780
case ADDR_LE_DEV_RANDOM_RESOLVED:
2781
if (resolved)
2782
*resolved = true;
2783
return ADDR_LE_DEV_RANDOM;
2784
}
2785
2786
if (resolved)
2787
*resolved = false;
2788
return type;
2789
}
2790
2791
static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2792
u8 peer_addr_type, u8 own_address_type,
2793
u8 filter_policy)
2794
{
2795
struct hci_conn *conn;
2796
2797
conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2798
peer_addr_type);
2799
if (!conn)
2800
return;
2801
2802
own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2803
2804
/* Store the initiator and responder address information which
2805
* is needed for SMP. These values will not change during the
2806
* lifetime of the connection.
2807
*/
2808
conn->init_addr_type = own_address_type;
2809
if (own_address_type == ADDR_LE_DEV_RANDOM)
2810
bacpy(&conn->init_addr, &hdev->random_addr);
2811
else
2812
bacpy(&conn->init_addr, &hdev->bdaddr);
2813
2814
conn->resp_addr_type = peer_addr_type;
2815
bacpy(&conn->resp_addr, peer_addr);
2816
}
2817
2818
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2819
{
2820
struct hci_cp_le_create_conn *cp;
2821
2822
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2823
2824
/* All connection failure handling is taken care of by the
2825
* hci_conn_failed function which is triggered by the HCI
2826
* request completion callbacks used for connecting.
2827
*/
2828
if (status)
2829
return;
2830
2831
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2832
if (!cp)
2833
return;
2834
2835
hci_dev_lock(hdev);
2836
2837
cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2838
cp->own_address_type, cp->filter_policy);
2839
2840
hci_dev_unlock(hdev);
2841
}
2842
2843
static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2844
{
2845
struct hci_cp_le_ext_create_conn *cp;
2846
2847
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2848
2849
/* All connection failure handling is taken care of by the
2850
* hci_conn_failed function which is triggered by the HCI
2851
* request completion callbacks used for connecting.
2852
*/
2853
if (status)
2854
return;
2855
2856
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2857
if (!cp)
2858
return;
2859
2860
hci_dev_lock(hdev);
2861
2862
cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2863
cp->own_addr_type, cp->filter_policy);
2864
2865
hci_dev_unlock(hdev);
2866
}
2867
2868
static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2869
{
2870
struct hci_cp_le_read_remote_features *cp;
2871
struct hci_conn *conn;
2872
2873
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2874
2875
if (!status)
2876
return;
2877
2878
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2879
if (!cp)
2880
return;
2881
2882
hci_dev_lock(hdev);
2883
2884
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2885
if (conn) {
2886
if (conn->state == BT_CONFIG) {
2887
hci_connect_cfm(conn, status);
2888
hci_conn_drop(conn);
2889
}
2890
}
2891
2892
hci_dev_unlock(hdev);
2893
}
2894
2895
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2896
{
2897
struct hci_cp_le_start_enc *cp;
2898
struct hci_conn *conn;
2899
2900
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2901
2902
if (!status)
2903
return;
2904
2905
hci_dev_lock(hdev);
2906
2907
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2908
if (!cp)
2909
goto unlock;
2910
2911
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2912
if (!conn)
2913
goto unlock;
2914
2915
if (conn->state != BT_CONNECTED)
2916
goto unlock;
2917
2918
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2919
hci_conn_drop(conn);
2920
2921
unlock:
2922
hci_dev_unlock(hdev);
2923
}
2924
2925
static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2926
{
2927
struct hci_cp_switch_role *cp;
2928
struct hci_conn *conn;
2929
2930
BT_DBG("%s status 0x%2.2x", hdev->name, status);
2931
2932
if (!status)
2933
return;
2934
2935
cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2936
if (!cp)
2937
return;
2938
2939
hci_dev_lock(hdev);
2940
2941
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2942
if (conn)
2943
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2944
2945
hci_dev_unlock(hdev);
2946
}
2947
2948
static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2949
struct sk_buff *skb)
2950
{
2951
struct hci_ev_status *ev = data;
2952
struct discovery_state *discov = &hdev->discovery;
2953
struct inquiry_entry *e;
2954
2955
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2956
2957
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2958
return;
2959
2960
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2961
wake_up_bit(&hdev->flags, HCI_INQUIRY);
2962
2963
if (!hci_dev_test_flag(hdev, HCI_MGMT))
2964
return;
2965
2966
hci_dev_lock(hdev);
2967
2968
if (discov->state != DISCOVERY_FINDING)
2969
goto unlock;
2970
2971
if (list_empty(&discov->resolve)) {
2972
/* When BR/EDR inquiry is active and no LE scanning is in
2973
* progress, then change discovery state to indicate completion.
2974
*
2975
* When running LE scanning and BR/EDR inquiry simultaneously
2976
* and the LE scan already finished, then change the discovery
2977
* state to indicate completion.
2978
*/
2979
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2980
!hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
2981
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2982
goto unlock;
2983
}
2984
2985
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2986
if (e && hci_resolve_name(hdev, e) == 0) {
2987
e->name_state = NAME_PENDING;
2988
hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2989
discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
2990
} else {
2991
/* When BR/EDR inquiry is active and no LE scanning is in
2992
* progress, then change discovery state to indicate completion.
2993
*
2994
* When running LE scanning and BR/EDR inquiry simultaneously
2995
* and the LE scan already finished, then change the discovery
2996
* state to indicate completion.
2997
*/
2998
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2999
!hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3000
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3001
}
3002
3003
unlock:
3004
hci_dev_unlock(hdev);
3005
}
3006
3007
static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3008
struct sk_buff *skb)
3009
{
3010
struct hci_ev_inquiry_result *ev = edata;
3011
struct inquiry_data data;
3012
int i;
3013
3014
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3015
flex_array_size(ev, info, ev->num)))
3016
return;
3017
3018
bt_dev_dbg(hdev, "num %d", ev->num);
3019
3020
if (!ev->num)
3021
return;
3022
3023
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3024
return;
3025
3026
hci_dev_lock(hdev);
3027
3028
for (i = 0; i < ev->num; i++) {
3029
struct inquiry_info *info = &ev->info[i];
3030
u32 flags;
3031
3032
bacpy(&data.bdaddr, &info->bdaddr);
3033
data.pscan_rep_mode = info->pscan_rep_mode;
3034
data.pscan_period_mode = info->pscan_period_mode;
3035
data.pscan_mode = info->pscan_mode;
3036
memcpy(data.dev_class, info->dev_class, 3);
3037
data.clock_offset = info->clock_offset;
3038
data.rssi = HCI_RSSI_INVALID;
3039
data.ssp_mode = 0x00;
3040
3041
flags = hci_inquiry_cache_update(hdev, &data, false);
3042
3043
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3044
info->dev_class, HCI_RSSI_INVALID,
3045
flags, NULL, 0, NULL, 0, 0);
3046
}
3047
3048
hci_dev_unlock(hdev);
3049
}
3050
3051
static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
3052
{
3053
struct hci_cp_read_enc_key_size cp;
3054
u8 *key_enc_size = hci_conn_key_enc_size(conn);
3055
3056
if (!read_key_size_capable(hdev)) {
3057
conn->enc_key_size = HCI_LINK_KEY_SIZE;
3058
return -EOPNOTSUPP;
3059
}
3060
3061
bt_dev_dbg(hdev, "hcon %p", conn);
3062
3063
memset(&cp, 0, sizeof(cp));
3064
cp.handle = cpu_to_le16(conn->handle);
3065
3066
/* If the key enc_size is already known, use it as conn->enc_key_size,
3067
* otherwise use hdev->min_enc_key_size so the likes of
3068
* l2cap_check_enc_key_size don't fail while waiting for
3069
* HCI_OP_READ_ENC_KEY_SIZE response.
3070
*/
3071
if (key_enc_size && *key_enc_size)
3072
conn->enc_key_size = *key_enc_size;
3073
else
3074
conn->enc_key_size = hdev->min_enc_key_size;
3075
3076
return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3077
}
3078
3079
static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3080
struct sk_buff *skb)
3081
{
3082
struct hci_ev_conn_complete *ev = data;
3083
struct hci_conn *conn;
3084
u8 status = ev->status;
3085
3086
bt_dev_dbg(hdev, "status 0x%2.2x", status);
3087
3088
hci_dev_lock(hdev);
3089
3090
/* Check for existing connection:
3091
*
3092
* 1. If it doesn't exist then it must be receiver/slave role.
3093
* 2. If it does exist confirm that it is connecting/BT_CONNECT in case
3094
* of initiator/master role since there could be a collision where
3095
* either side is attempting to connect or something like a fuzzing
3096
* testing is trying to play tricks to destroy the hcon object before
3097
* it even attempts to connect (e.g. hcon->state == BT_OPEN).
3098
*/
3099
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3100
if (!conn ||
3101
(conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
3102
/* In case of error status and there is no connection pending
3103
* just unlock as there is nothing to cleanup.
3104
*/
3105
if (ev->status)
3106
goto unlock;
3107
3108
/* Connection may not exist if auto-connected. Check the bredr
3109
* allowlist to see if this device is allowed to auto connect.
3110
* If link is an ACL type, create a connection class
3111
* automatically.
3112
*
3113
* Auto-connect will only occur if the event filter is
3114
* programmed with a given address. Right now, event filter is
3115
* only used during suspend.
3116
*/
3117
if (ev->link_type == ACL_LINK &&
3118
hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3119
&ev->bdaddr,
3120
BDADDR_BREDR)) {
3121
conn = hci_conn_add_unset(hdev, ev->link_type,
3122
&ev->bdaddr, HCI_ROLE_SLAVE);
3123
if (IS_ERR(conn)) {
3124
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3125
goto unlock;
3126
}
3127
} else {
3128
if (ev->link_type != SCO_LINK)
3129
goto unlock;
3130
3131
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3132
&ev->bdaddr);
3133
if (!conn)
3134
goto unlock;
3135
3136
conn->type = SCO_LINK;
3137
}
3138
}
3139
3140
/* The HCI_Connection_Complete event is only sent once per connection.
3141
* Processing it more than once per connection can corrupt kernel memory.
3142
*
3143
* As the connection handle is set here for the first time, it indicates
3144
* whether the connection is already set up.
3145
*/
3146
if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3147
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3148
goto unlock;
3149
}
3150
3151
if (!status) {
3152
status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3153
if (status)
3154
goto done;
3155
3156
if (conn->type == ACL_LINK) {
3157
conn->state = BT_CONFIG;
3158
hci_conn_hold(conn);
3159
3160
if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3161
!hci_find_link_key(hdev, &ev->bdaddr))
3162
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3163
else
3164
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3165
} else
3166
conn->state = BT_CONNECTED;
3167
3168
hci_debugfs_create_conn(conn);
3169
hci_conn_add_sysfs(conn);
3170
3171
if (test_bit(HCI_AUTH, &hdev->flags))
3172
set_bit(HCI_CONN_AUTH, &conn->flags);
3173
3174
if (test_bit(HCI_ENCRYPT, &hdev->flags))
3175
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3176
3177
/* "Link key request" completed ahead of "connect request" completes */
3178
if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3179
ev->link_type == ACL_LINK) {
3180
struct link_key *key;
3181
3182
key = hci_find_link_key(hdev, &ev->bdaddr);
3183
if (key) {
3184
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3185
hci_read_enc_key_size(hdev, conn);
3186
hci_encrypt_cfm(conn, ev->status);
3187
}
3188
}
3189
3190
/* Get remote features */
3191
if (conn->type == ACL_LINK) {
3192
struct hci_cp_read_remote_features cp;
3193
cp.handle = ev->handle;
3194
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3195
sizeof(cp), &cp);
3196
3197
hci_update_scan(hdev);
3198
}
3199
3200
/* Set packet type for incoming connection */
3201
if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3202
struct hci_cp_change_conn_ptype cp;
3203
cp.handle = ev->handle;
3204
cp.pkt_type = cpu_to_le16(conn->pkt_type);
3205
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3206
&cp);
3207
}
3208
}
3209
3210
if (conn->type == ACL_LINK)
3211
hci_sco_setup(conn, ev->status);
3212
3213
done:
3214
if (status) {
3215
hci_conn_failed(conn, status);
3216
} else if (ev->link_type == SCO_LINK) {
3217
switch (conn->setting & SCO_AIRMODE_MASK) {
3218
case SCO_AIRMODE_CVSD:
3219
if (hdev->notify)
3220
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3221
break;
3222
}
3223
3224
hci_connect_cfm(conn, status);
3225
}
3226
3227
unlock:
3228
hci_dev_unlock(hdev);
3229
}
3230
3231
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3232
{
3233
struct hci_cp_reject_conn_req cp;
3234
3235
bacpy(&cp.bdaddr, bdaddr);
3236
cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3237
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3238
}
3239
3240
static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3241
struct sk_buff *skb)
3242
{
3243
struct hci_ev_conn_request *ev = data;
3244
int mask = hdev->link_mode;
3245
struct inquiry_entry *ie;
3246
struct hci_conn *conn;
3247
__u8 flags = 0;
3248
3249
bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3250
3251
/* Reject incoming connection from device with same BD ADDR against
3252
* CVE-2020-26555
3253
*/
3254
if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3255
bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3256
&ev->bdaddr);
3257
hci_reject_conn(hdev, &ev->bdaddr);
3258
return;
3259
}
3260
3261
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3262
&flags);
3263
3264
if (!(mask & HCI_LM_ACCEPT)) {
3265
hci_reject_conn(hdev, &ev->bdaddr);
3266
return;
3267
}
3268
3269
hci_dev_lock(hdev);
3270
3271
if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3272
BDADDR_BREDR)) {
3273
hci_reject_conn(hdev, &ev->bdaddr);
3274
goto unlock;
3275
}
3276
3277
/* Require HCI_CONNECTABLE or an accept list entry to accept the
3278
* connection. These features are only touched through mgmt so
3279
* only do the checks if HCI_MGMT is set.
3280
*/
3281
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3282
!hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3283
!hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3284
BDADDR_BREDR)) {
3285
hci_reject_conn(hdev, &ev->bdaddr);
3286
goto unlock;
3287
}
3288
3289
/* Connection accepted */
3290
3291
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3292
if (ie)
3293
memcpy(ie->data.dev_class, ev->dev_class, 3);
3294
3295
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3296
&ev->bdaddr);
3297
if (!conn) {
3298
conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3299
HCI_ROLE_SLAVE);
3300
if (IS_ERR(conn)) {
3301
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3302
goto unlock;
3303
}
3304
}
3305
3306
memcpy(conn->dev_class, ev->dev_class, 3);
3307
3308
hci_dev_unlock(hdev);
3309
3310
if (ev->link_type == ACL_LINK ||
3311
(!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3312
struct hci_cp_accept_conn_req cp;
3313
conn->state = BT_CONNECT;
3314
3315
bacpy(&cp.bdaddr, &ev->bdaddr);
3316
3317
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3318
cp.role = 0x00; /* Become central */
3319
else
3320
cp.role = 0x01; /* Remain peripheral */
3321
3322
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3323
} else if (!(flags & HCI_PROTO_DEFER)) {
3324
struct hci_cp_accept_sync_conn_req cp;
3325
conn->state = BT_CONNECT;
3326
3327
bacpy(&cp.bdaddr, &ev->bdaddr);
3328
cp.pkt_type = cpu_to_le16(conn->pkt_type);
3329
3330
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3331
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3332
cp.max_latency = cpu_to_le16(0xffff);
3333
cp.content_format = cpu_to_le16(hdev->voice_setting);
3334
cp.retrans_effort = 0xff;
3335
3336
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3337
&cp);
3338
} else {
3339
conn->state = BT_CONNECT2;
3340
hci_connect_cfm(conn, 0);
3341
}
3342
3343
return;
3344
unlock:
3345
hci_dev_unlock(hdev);
3346
}
3347
3348
static u8 hci_to_mgmt_reason(u8 err)
3349
{
3350
switch (err) {
3351
case HCI_ERROR_CONNECTION_TIMEOUT:
3352
return MGMT_DEV_DISCONN_TIMEOUT;
3353
case HCI_ERROR_REMOTE_USER_TERM:
3354
case HCI_ERROR_REMOTE_LOW_RESOURCES:
3355
case HCI_ERROR_REMOTE_POWER_OFF:
3356
return MGMT_DEV_DISCONN_REMOTE;
3357
case HCI_ERROR_LOCAL_HOST_TERM:
3358
return MGMT_DEV_DISCONN_LOCAL_HOST;
3359
default:
3360
return MGMT_DEV_DISCONN_UNKNOWN;
3361
}
3362
}
3363
3364
static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3365
struct sk_buff *skb)
3366
{
3367
struct hci_ev_disconn_complete *ev = data;
3368
u8 reason;
3369
struct hci_conn_params *params;
3370
struct hci_conn *conn;
3371
bool mgmt_connected;
3372
3373
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3374
3375
hci_dev_lock(hdev);
3376
3377
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3378
if (!conn)
3379
goto unlock;
3380
3381
if (ev->status) {
3382
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3383
conn->dst_type, ev->status);
3384
goto unlock;
3385
}
3386
3387
conn->state = BT_CLOSED;
3388
3389
mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3390
3391
if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3392
reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3393
else
3394
reason = hci_to_mgmt_reason(ev->reason);
3395
3396
mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3397
reason, mgmt_connected);
3398
3399
if (conn->type == ACL_LINK) {
3400
if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3401
hci_remove_link_key(hdev, &conn->dst);
3402
3403
hci_update_scan(hdev);
3404
}
3405
3406
/* Re-enable passive scanning if disconnected device is marked
3407
* as auto-connectable.
3408
*/
3409
if (conn->type == LE_LINK) {
3410
params = hci_conn_params_lookup(hdev, &conn->dst,
3411
conn->dst_type);
3412
if (params) {
3413
switch (params->auto_connect) {
3414
case HCI_AUTO_CONN_LINK_LOSS:
3415
if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3416
break;
3417
fallthrough;
3418
3419
case HCI_AUTO_CONN_DIRECT:
3420
case HCI_AUTO_CONN_ALWAYS:
3421
hci_pend_le_list_del_init(params);
3422
hci_pend_le_list_add(params,
3423
&hdev->pend_le_conns);
3424
hci_update_passive_scan(hdev);
3425
break;
3426
3427
default:
3428
break;
3429
}
3430
}
3431
}
3432
3433
hci_disconn_cfm(conn, ev->reason);
3434
3435
/* Re-enable advertising if necessary, since it might
3436
* have been disabled by the connection. From the
3437
* HCI_LE_Set_Advertise_Enable command description in
3438
* the core specification (v4.0):
3439
* "The Controller shall continue advertising until the Host
3440
* issues an LE_Set_Advertise_Enable command with
3441
* Advertising_Enable set to 0x00 (Advertising is disabled)
3442
* or until a connection is created or until the Advertising
3443
* is timed out due to Directed Advertising."
3444
*/
3445
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3446
hdev->cur_adv_instance = conn->adv_instance;
3447
hci_enable_advertising(hdev);
3448
}
3449
3450
hci_conn_del(conn);
3451
3452
unlock:
3453
hci_dev_unlock(hdev);
3454
}
3455
3456
static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3457
struct sk_buff *skb)
3458
{
3459
struct hci_ev_auth_complete *ev = data;
3460
struct hci_conn *conn;
3461
3462
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3463
3464
hci_dev_lock(hdev);
3465
3466
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3467
if (!conn)
3468
goto unlock;
3469
3470
if (!ev->status) {
3471
clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3472
set_bit(HCI_CONN_AUTH, &conn->flags);
3473
conn->sec_level = conn->pending_sec_level;
3474
} else {
3475
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3476
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3477
3478
mgmt_auth_failed(conn, ev->status);
3479
}
3480
3481
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3482
3483
if (conn->state == BT_CONFIG) {
3484
if (!ev->status && hci_conn_ssp_enabled(conn)) {
3485
struct hci_cp_set_conn_encrypt cp;
3486
cp.handle = ev->handle;
3487
cp.encrypt = 0x01;
3488
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3489
&cp);
3490
} else {
3491
conn->state = BT_CONNECTED;
3492
hci_connect_cfm(conn, ev->status);
3493
hci_conn_drop(conn);
3494
}
3495
} else {
3496
hci_auth_cfm(conn, ev->status);
3497
3498
hci_conn_hold(conn);
3499
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3500
hci_conn_drop(conn);
3501
}
3502
3503
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3504
if (!ev->status) {
3505
struct hci_cp_set_conn_encrypt cp;
3506
cp.handle = ev->handle;
3507
cp.encrypt = 0x01;
3508
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3509
&cp);
3510
} else {
3511
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3512
hci_encrypt_cfm(conn, ev->status);
3513
}
3514
}
3515
3516
unlock:
3517
hci_dev_unlock(hdev);
3518
}
3519
3520
static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3521
struct sk_buff *skb)
3522
{
3523
struct hci_ev_remote_name *ev = data;
3524
struct hci_conn *conn;
3525
3526
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3527
3528
hci_dev_lock(hdev);
3529
3530
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3531
3532
if (!hci_dev_test_flag(hdev, HCI_MGMT))
3533
goto check_auth;
3534
3535
if (ev->status == 0)
3536
hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3537
strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3538
else
3539
hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3540
3541
check_auth:
3542
if (!conn)
3543
goto unlock;
3544
3545
if (!hci_outgoing_auth_needed(hdev, conn))
3546
goto unlock;
3547
3548
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3549
struct hci_cp_auth_requested cp;
3550
3551
set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3552
3553
cp.handle = __cpu_to_le16(conn->handle);
3554
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3555
}
3556
3557
unlock:
3558
hci_dev_unlock(hdev);
3559
}
3560
3561
static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3562
struct sk_buff *skb)
3563
{
3564
struct hci_ev_encrypt_change *ev = data;
3565
struct hci_conn *conn;
3566
3567
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3568
3569
hci_dev_lock(hdev);
3570
3571
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3572
if (!conn)
3573
goto unlock;
3574
3575
if (!ev->status) {
3576
if (ev->encrypt) {
3577
/* Encryption implies authentication */
3578
set_bit(HCI_CONN_AUTH, &conn->flags);
3579
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3580
conn->sec_level = conn->pending_sec_level;
3581
3582
/* P-256 authentication key implies FIPS */
3583
if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3584
set_bit(HCI_CONN_FIPS, &conn->flags);
3585
3586
if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3587
conn->type == LE_LINK)
3588
set_bit(HCI_CONN_AES_CCM, &conn->flags);
3589
} else {
3590
clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3591
clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3592
}
3593
}
3594
3595
/* We should disregard the current RPA and generate a new one
3596
* whenever the encryption procedure fails.
3597
*/
3598
if (ev->status && conn->type == LE_LINK) {
3599
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3600
hci_adv_instances_set_rpa_expired(hdev, true);
3601
}
3602
3603
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3604
3605
/* Check link security requirements are met */
3606
if (!hci_conn_check_link_mode(conn))
3607
ev->status = HCI_ERROR_AUTH_FAILURE;
3608
3609
if (ev->status && conn->state == BT_CONNECTED) {
3610
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3611
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3612
3613
/* Notify upper layers so they can cleanup before
3614
* disconnecting.
3615
*/
3616
hci_encrypt_cfm(conn, ev->status);
3617
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3618
hci_conn_drop(conn);
3619
goto unlock;
3620
}
3621
3622
/* Try reading the encryption key size for encrypted ACL links */
3623
if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3624
if (hci_read_enc_key_size(hdev, conn))
3625
goto notify;
3626
3627
goto unlock;
3628
}
3629
3630
/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3631
* to avoid unexpected SMP command errors when pairing.
3632
*/
3633
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
3634
goto notify;
3635
3636
/* Set the default Authenticated Payload Timeout after
3637
* an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3638
* Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3639
* sent when the link is active and Encryption is enabled, the conn
3640
* type can be either LE or ACL and controller must support LMP Ping.
3641
* Ensure for AES-CCM encryption as well.
3642
*/
3643
if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3644
test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3645
((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3646
(conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3647
struct hci_cp_write_auth_payload_to cp;
3648
3649
cp.handle = cpu_to_le16(conn->handle);
3650
cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3651
if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3652
sizeof(cp), &cp))
3653
bt_dev_err(hdev, "write auth payload timeout failed");
3654
}
3655
3656
notify:
3657
hci_encrypt_cfm(conn, ev->status);
3658
3659
unlock:
3660
hci_dev_unlock(hdev);
3661
}
3662
3663
static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3664
struct sk_buff *skb)
3665
{
3666
struct hci_ev_change_link_key_complete *ev = data;
3667
struct hci_conn *conn;
3668
3669
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3670
3671
hci_dev_lock(hdev);
3672
3673
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3674
if (conn) {
3675
if (!ev->status)
3676
set_bit(HCI_CONN_SECURE, &conn->flags);
3677
3678
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3679
3680
hci_key_change_cfm(conn, ev->status);
3681
}
3682
3683
hci_dev_unlock(hdev);
3684
}
3685
3686
static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3687
struct sk_buff *skb)
3688
{
3689
struct hci_ev_remote_features *ev = data;
3690
struct hci_conn *conn;
3691
3692
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3693
3694
hci_dev_lock(hdev);
3695
3696
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3697
if (!conn)
3698
goto unlock;
3699
3700
if (!ev->status)
3701
memcpy(conn->features[0], ev->features, 8);
3702
3703
if (conn->state != BT_CONFIG)
3704
goto unlock;
3705
3706
if (!ev->status && lmp_ext_feat_capable(hdev) &&
3707
lmp_ext_feat_capable(conn)) {
3708
struct hci_cp_read_remote_ext_features cp;
3709
cp.handle = ev->handle;
3710
cp.page = 0x01;
3711
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3712
sizeof(cp), &cp);
3713
goto unlock;
3714
}
3715
3716
if (!ev->status) {
3717
struct hci_cp_remote_name_req cp;
3718
memset(&cp, 0, sizeof(cp));
3719
bacpy(&cp.bdaddr, &conn->dst);
3720
cp.pscan_rep_mode = 0x02;
3721
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3722
} else {
3723
mgmt_device_connected(hdev, conn, NULL, 0);
3724
}
3725
3726
if (!hci_outgoing_auth_needed(hdev, conn)) {
3727
conn->state = BT_CONNECTED;
3728
hci_connect_cfm(conn, ev->status);
3729
hci_conn_drop(conn);
3730
}
3731
3732
unlock:
3733
hci_dev_unlock(hdev);
3734
}
3735
3736
static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3737
{
3738
cancel_delayed_work(&hdev->cmd_timer);
3739
3740
rcu_read_lock();
3741
if (!test_bit(HCI_RESET, &hdev->flags)) {
3742
if (ncmd) {
3743
cancel_delayed_work(&hdev->ncmd_timer);
3744
atomic_set(&hdev->cmd_cnt, 1);
3745
} else {
3746
if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3747
queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3748
HCI_NCMD_TIMEOUT);
3749
}
3750
}
3751
rcu_read_unlock();
3752
}
3753
3754
static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3755
struct sk_buff *skb)
3756
{
3757
struct hci_rp_le_read_buffer_size_v2 *rp = data;
3758
3759
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3760
3761
if (rp->status)
3762
return rp->status;
3763
3764
hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3765
hdev->le_pkts = rp->acl_max_pkt;
3766
hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3767
hdev->iso_pkts = rp->iso_max_pkt;
3768
3769
hdev->le_cnt = hdev->le_pkts;
3770
hdev->iso_cnt = hdev->iso_pkts;
3771
3772
BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3773
hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3774
3775
if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3776
return HCI_ERROR_INVALID_PARAMETERS;
3777
3778
return rp->status;
3779
}
3780
3781
static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3782
{
3783
struct hci_conn *conn, *tmp;
3784
3785
lockdep_assert_held(&hdev->lock);
3786
3787
list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3788
if (conn->type != CIS_LINK ||
3789
conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3790
continue;
3791
3792
if (HCI_CONN_HANDLE_UNSET(conn->handle))
3793
hci_conn_failed(conn, status);
3794
}
3795
}
3796
3797
static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3798
struct sk_buff *skb)
3799
{
3800
struct hci_rp_le_set_cig_params *rp = data;
3801
struct hci_cp_le_set_cig_params *cp;
3802
struct hci_conn *conn;
3803
u8 status = rp->status;
3804
bool pending = false;
3805
int i;
3806
3807
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3808
3809
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3810
if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3811
rp->cig_id != cp->cig_id)) {
3812
bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3813
status = HCI_ERROR_UNSPECIFIED;
3814
}
3815
3816
hci_dev_lock(hdev);
3817
3818
/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3819
*
3820
* If the Status return parameter is non-zero, then the state of the CIG
3821
* and its CIS configurations shall not be changed by the command. If
3822
* the CIG did not already exist, it shall not be created.
3823
*/
3824
if (status) {
3825
/* Keep current configuration, fail only the unbound CIS */
3826
hci_unbound_cis_failed(hdev, rp->cig_id, status);
3827
goto unlock;
3828
}
3829
3830
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3831
*
3832
* If the Status return parameter is zero, then the Controller shall
3833
* set the Connection_Handle arrayed return parameter to the connection
3834
* handle(s) corresponding to the CIS configurations specified in
3835
* the CIS_IDs command parameter, in the same order.
3836
*/
3837
for (i = 0; i < rp->num_handles; ++i) {
3838
conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3839
cp->cis[i].cis_id);
3840
if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3841
continue;
3842
3843
if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3844
continue;
3845
3846
if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3847
continue;
3848
3849
if (conn->state == BT_CONNECT)
3850
pending = true;
3851
}
3852
3853
unlock:
3854
if (pending)
3855
hci_le_create_cis_pending(hdev);
3856
3857
hci_dev_unlock(hdev);
3858
3859
return rp->status;
3860
}
3861
3862
static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3863
struct sk_buff *skb)
3864
{
3865
struct hci_rp_le_setup_iso_path *rp = data;
3866
struct hci_cp_le_setup_iso_path *cp;
3867
struct hci_conn *conn;
3868
3869
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3870
3871
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3872
if (!cp)
3873
return rp->status;
3874
3875
hci_dev_lock(hdev);
3876
3877
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3878
if (!conn)
3879
goto unlock;
3880
3881
if (rp->status) {
3882
hci_connect_cfm(conn, rp->status);
3883
hci_conn_del(conn);
3884
goto unlock;
3885
}
3886
3887
switch (cp->direction) {
3888
/* Input (Host to Controller) */
3889
case 0x00:
3890
/* Only confirm connection if output only */
3891
if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3892
hci_connect_cfm(conn, rp->status);
3893
break;
3894
/* Output (Controller to Host) */
3895
case 0x01:
3896
/* Confirm connection since conn->iso_qos is always configured
3897
* last.
3898
*/
3899
hci_connect_cfm(conn, rp->status);
3900
3901
/* Notify device connected in case it is a BIG Sync */
3902
if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3903
mgmt_device_connected(hdev, conn, NULL, 0);
3904
3905
break;
3906
}
3907
3908
unlock:
3909
hci_dev_unlock(hdev);
3910
return rp->status;
3911
}
3912
3913
static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3914
{
3915
bt_dev_dbg(hdev, "status 0x%2.2x", status);
3916
}
3917
3918
static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3919
struct sk_buff *skb)
3920
{
3921
struct hci_ev_status *rp = data;
3922
struct hci_cp_le_set_per_adv_params *cp;
3923
3924
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3925
3926
if (rp->status)
3927
return rp->status;
3928
3929
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3930
if (!cp)
3931
return rp->status;
3932
3933
/* TODO: set the conn state */
3934
return rp->status;
3935
}
3936
3937
static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3938
struct sk_buff *skb)
3939
{
3940
struct hci_ev_status *rp = data;
3941
struct hci_cp_le_set_per_adv_enable *cp;
3942
struct adv_info *adv = NULL, *n;
3943
u8 per_adv_cnt = 0;
3944
3945
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3946
3947
if (rp->status)
3948
return rp->status;
3949
3950
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3951
if (!cp)
3952
return rp->status;
3953
3954
hci_dev_lock(hdev);
3955
3956
adv = hci_find_adv_instance(hdev, cp->handle);
3957
3958
if (cp->enable) {
3959
hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3960
3961
if (adv)
3962
adv->enabled = true;
3963
} else {
3964
/* If just one instance was disabled check if there are
3965
* any other instance enabled before clearing HCI_LE_PER_ADV.
3966
* The current periodic adv instance will be marked as
3967
* disabled once extended advertising is also disabled.
3968
*/
3969
list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3970
list) {
3971
if (adv->periodic && adv->enabled)
3972
per_adv_cnt++;
3973
}
3974
3975
if (per_adv_cnt > 1)
3976
goto unlock;
3977
3978
hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3979
}
3980
3981
unlock:
3982
hci_dev_unlock(hdev);
3983
3984
return rp->status;
3985
}
3986
3987
#define HCI_CC_VL(_op, _func, _min, _max) \
3988
{ \
3989
.op = _op, \
3990
.func = _func, \
3991
.min_len = _min, \
3992
.max_len = _max, \
3993
}
3994
3995
#define HCI_CC(_op, _func, _len) \
3996
HCI_CC_VL(_op, _func, _len, _len)
3997
3998
#define HCI_CC_STATUS(_op, _func) \
3999
HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4000
4001
static const struct hci_cc {
4002
u16 op;
4003
u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4004
u16 min_len;
4005
u16 max_len;
4006
} hci_cc_table[] = {
4007
HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4008
HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4009
HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4010
HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel,
4011
sizeof(struct hci_rp_remote_name_req_cancel)),
4012
HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4013
sizeof(struct hci_rp_role_discovery)),
4014
HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4015
sizeof(struct hci_rp_read_link_policy)),
4016
HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4017
sizeof(struct hci_rp_write_link_policy)),
4018
HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4019
sizeof(struct hci_rp_read_def_link_policy)),
4020
HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4021
hci_cc_write_def_link_policy),
4022
HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4023
HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4024
sizeof(struct hci_rp_read_stored_link_key)),
4025
HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4026
sizeof(struct hci_rp_delete_stored_link_key)),
4027
HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4028
HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4029
sizeof(struct hci_rp_read_local_name)),
4030
HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4031
HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4032
HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4033
HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4034
HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4035
sizeof(struct hci_rp_read_class_of_dev)),
4036
HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4037
HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4038
sizeof(struct hci_rp_read_voice_setting)),
4039
HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4040
HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4041
sizeof(struct hci_rp_read_num_supported_iac)),
4042
HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4043
HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4044
HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4045
sizeof(struct hci_rp_read_auth_payload_to)),
4046
HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4047
sizeof(struct hci_rp_write_auth_payload_to)),
4048
HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4049
sizeof(struct hci_rp_read_local_version)),
4050
HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4051
sizeof(struct hci_rp_read_local_commands)),
4052
HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4053
sizeof(struct hci_rp_read_local_features)),
4054
HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4055
sizeof(struct hci_rp_read_local_ext_features)),
4056
HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4057
sizeof(struct hci_rp_read_buffer_size)),
4058
HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4059
sizeof(struct hci_rp_read_bd_addr)),
4060
HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4061
sizeof(struct hci_rp_read_local_pairing_opts)),
4062
HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4063
sizeof(struct hci_rp_read_page_scan_activity)),
4064
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4065
hci_cc_write_page_scan_activity),
4066
HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4067
sizeof(struct hci_rp_read_page_scan_type)),
4068
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4069
HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4070
sizeof(struct hci_rp_read_clock)),
4071
HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4072
sizeof(struct hci_rp_read_enc_key_size)),
4073
HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4074
sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4075
HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4076
hci_cc_read_def_err_data_reporting,
4077
sizeof(struct hci_rp_read_def_err_data_reporting)),
4078
HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4079
hci_cc_write_def_err_data_reporting),
4080
HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4081
sizeof(struct hci_rp_pin_code_reply)),
4082
HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4083
sizeof(struct hci_rp_pin_code_neg_reply)),
4084
HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4085
sizeof(struct hci_rp_read_local_oob_data)),
4086
HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4087
sizeof(struct hci_rp_read_local_oob_ext_data)),
4088
HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4089
sizeof(struct hci_rp_le_read_buffer_size)),
4090
HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4091
sizeof(struct hci_rp_le_read_local_features)),
4092
HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4093
sizeof(struct hci_rp_le_read_adv_tx_power)),
4094
HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4095
sizeof(struct hci_rp_user_confirm_reply)),
4096
HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4097
sizeof(struct hci_rp_user_confirm_reply)),
4098
HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4099
sizeof(struct hci_rp_user_confirm_reply)),
4100
HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4101
sizeof(struct hci_rp_user_confirm_reply)),
4102
HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4103
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4104
HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4105
HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4106
HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4107
hci_cc_le_read_accept_list_size,
4108
sizeof(struct hci_rp_le_read_accept_list_size)),
4109
HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4110
HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4111
hci_cc_le_add_to_accept_list),
4112
HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4113
hci_cc_le_del_from_accept_list),
4114
HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4115
sizeof(struct hci_rp_le_read_supported_states)),
4116
HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4117
sizeof(struct hci_rp_le_read_def_data_len)),
4118
HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4119
hci_cc_le_write_def_data_len),
4120
HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4121
hci_cc_le_add_to_resolv_list),
4122
HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4123
hci_cc_le_del_from_resolv_list),
4124
HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4125
hci_cc_le_clear_resolv_list),
4126
HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4127
sizeof(struct hci_rp_le_read_resolv_list_size)),
4128
HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4129
hci_cc_le_set_addr_resolution_enable),
4130
HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4131
sizeof(struct hci_rp_le_read_max_data_len)),
4132
HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4133
hci_cc_write_le_host_supported),
4134
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4135
HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4136
sizeof(struct hci_rp_read_rssi)),
4137
HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4138
sizeof(struct hci_rp_read_tx_power)),
4139
HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4140
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4141
hci_cc_le_set_ext_scan_param),
4142
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4143
hci_cc_le_set_ext_scan_enable),
4144
HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4145
HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4146
hci_cc_le_read_num_adv_sets,
4147
sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4148
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4149
hci_cc_le_set_ext_adv_enable),
4150
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4151
hci_cc_le_set_adv_set_random_addr),
4152
HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4153
HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4154
HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4155
HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4156
hci_cc_le_set_per_adv_enable),
4157
HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4158
sizeof(struct hci_rp_le_read_transmit_power)),
4159
HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4160
HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4161
sizeof(struct hci_rp_le_read_buffer_size_v2)),
4162
HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4163
sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4164
HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4165
sizeof(struct hci_rp_le_setup_iso_path)),
4166
};
4167
4168
static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4169
struct sk_buff *skb)
4170
{
4171
void *data;
4172
4173
if (skb->len < cc->min_len) {
4174
bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4175
cc->op, skb->len, cc->min_len);
4176
return HCI_ERROR_UNSPECIFIED;
4177
}
4178
4179
/* Just warn if the length is over max_len size it still be possible to
4180
* partially parse the cc so leave to callback to decide if that is
4181
* acceptable.
4182
*/
4183
if (skb->len > cc->max_len)
4184
bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4185
cc->op, skb->len, cc->max_len);
4186
4187
data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4188
if (!data)
4189
return HCI_ERROR_UNSPECIFIED;
4190
4191
return cc->func(hdev, data, skb);
4192
}
4193
4194
static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4195
struct sk_buff *skb, u16 *opcode, u8 *status,
4196
hci_req_complete_t *req_complete,
4197
hci_req_complete_skb_t *req_complete_skb)
4198
{
4199
struct hci_ev_cmd_complete *ev = data;
4200
int i;
4201
4202
*opcode = __le16_to_cpu(ev->opcode);
4203
4204
bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4205
4206
for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4207
if (hci_cc_table[i].op == *opcode) {
4208
*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4209
break;
4210
}
4211
}
4212
4213
if (i == ARRAY_SIZE(hci_cc_table)) {
4214
/* Unknown opcode, assume byte 0 contains the status, so
4215
* that e.g. __hci_cmd_sync() properly returns errors
4216
* for vendor specific commands send by HCI drivers.
4217
* If a vendor doesn't actually follow this convention we may
4218
* need to introduce a vendor CC table in order to properly set
4219
* the status.
4220
*/
4221
*status = skb->data[0];
4222
}
4223
4224
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4225
4226
hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4227
req_complete_skb);
4228
4229
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4230
bt_dev_err(hdev,
4231
"unexpected event for opcode 0x%4.4x", *opcode);
4232
return;
4233
}
4234
4235
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4236
queue_work(hdev->workqueue, &hdev->cmd_work);
4237
}
4238
4239
static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4240
{
4241
struct hci_cp_le_create_cis *cp;
4242
bool pending = false;
4243
int i;
4244
4245
bt_dev_dbg(hdev, "status 0x%2.2x", status);
4246
4247
if (!status)
4248
return;
4249
4250
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4251
if (!cp)
4252
return;
4253
4254
hci_dev_lock(hdev);
4255
4256
/* Remove connection if command failed */
4257
for (i = 0; i < cp->num_cis; i++) {
4258
struct hci_conn *conn;
4259
u16 handle;
4260
4261
handle = __le16_to_cpu(cp->cis[i].cis_handle);
4262
4263
conn = hci_conn_hash_lookup_handle(hdev, handle);
4264
if (conn) {
4265
if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4266
&conn->flags))
4267
pending = true;
4268
conn->state = BT_CLOSED;
4269
hci_connect_cfm(conn, status);
4270
hci_conn_del(conn);
4271
}
4272
}
4273
cp->num_cis = 0;
4274
4275
if (pending)
4276
hci_le_create_cis_pending(hdev);
4277
4278
hci_dev_unlock(hdev);
4279
}
4280
4281
#define HCI_CS(_op, _func) \
4282
{ \
4283
.op = _op, \
4284
.func = _func, \
4285
}
4286
4287
static const struct hci_cs {
4288
u16 op;
4289
void (*func)(struct hci_dev *hdev, __u8 status);
4290
} hci_cs_table[] = {
4291
HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4292
HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4293
HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4294
HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4295
HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4296
HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4297
HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4298
HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4299
HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4300
hci_cs_read_remote_ext_features),
4301
HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4302
HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4303
hci_cs_enhanced_setup_sync_conn),
4304
HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4305
HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4306
HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4307
HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4308
HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4309
HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4310
HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4311
HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4312
HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4313
};
4314
4315
static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4316
struct sk_buff *skb, u16 *opcode, u8 *status,
4317
hci_req_complete_t *req_complete,
4318
hci_req_complete_skb_t *req_complete_skb)
4319
{
4320
struct hci_ev_cmd_status *ev = data;
4321
int i;
4322
4323
*opcode = __le16_to_cpu(ev->opcode);
4324
*status = ev->status;
4325
4326
bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4327
4328
for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4329
if (hci_cs_table[i].op == *opcode) {
4330
hci_cs_table[i].func(hdev, ev->status);
4331
break;
4332
}
4333
}
4334
4335
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4336
4337
/* Indicate request completion if the command failed. Also, if
4338
* we're not waiting for a special event and we get a success
4339
* command status we should try to flag the request as completed
4340
* (since for this kind of commands there will not be a command
4341
* complete event).
4342
*/
4343
if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4344
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4345
req_complete_skb);
4346
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4347
bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4348
*opcode);
4349
return;
4350
}
4351
}
4352
4353
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4354
queue_work(hdev->workqueue, &hdev->cmd_work);
4355
}
4356
4357
static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4358
struct sk_buff *skb)
4359
{
4360
struct hci_ev_hardware_error *ev = data;
4361
4362
bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4363
4364
hdev->hw_error_code = ev->code;
4365
4366
queue_work(hdev->req_workqueue, &hdev->error_reset);
4367
}
4368
4369
static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4370
struct sk_buff *skb)
4371
{
4372
struct hci_ev_role_change *ev = data;
4373
struct hci_conn *conn;
4374
4375
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4376
4377
hci_dev_lock(hdev);
4378
4379
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4380
if (conn) {
4381
if (!ev->status)
4382
conn->role = ev->role;
4383
4384
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4385
4386
hci_role_switch_cfm(conn, ev->status, ev->role);
4387
}
4388
4389
hci_dev_unlock(hdev);
4390
}
4391
4392
static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4393
struct sk_buff *skb)
4394
{
4395
struct hci_ev_num_comp_pkts *ev = data;
4396
int i;
4397
4398
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4399
flex_array_size(ev, handles, ev->num)))
4400
return;
4401
4402
bt_dev_dbg(hdev, "num %d", ev->num);
4403
4404
hci_dev_lock(hdev);
4405
4406
for (i = 0; i < ev->num; i++) {
4407
struct hci_comp_pkts_info *info = &ev->handles[i];
4408
struct hci_conn *conn;
4409
__u16 handle, count;
4410
unsigned int i;
4411
4412
handle = __le16_to_cpu(info->handle);
4413
count = __le16_to_cpu(info->count);
4414
4415
conn = hci_conn_hash_lookup_handle(hdev, handle);
4416
if (!conn)
4417
continue;
4418
4419
/* Check if there is really enough packets outstanding before
4420
* attempting to decrease the sent counter otherwise it could
4421
* underflow..
4422
*/
4423
if (conn->sent >= count) {
4424
conn->sent -= count;
4425
} else {
4426
bt_dev_warn(hdev, "hcon %p sent %u < count %u",
4427
conn, conn->sent, count);
4428
conn->sent = 0;
4429
}
4430
4431
for (i = 0; i < count; ++i)
4432
hci_conn_tx_dequeue(conn);
4433
4434
switch (conn->type) {
4435
case ACL_LINK:
4436
hdev->acl_cnt += count;
4437
if (hdev->acl_cnt > hdev->acl_pkts)
4438
hdev->acl_cnt = hdev->acl_pkts;
4439
break;
4440
4441
case LE_LINK:
4442
if (hdev->le_pkts) {
4443
hdev->le_cnt += count;
4444
if (hdev->le_cnt > hdev->le_pkts)
4445
hdev->le_cnt = hdev->le_pkts;
4446
} else {
4447
hdev->acl_cnt += count;
4448
if (hdev->acl_cnt > hdev->acl_pkts)
4449
hdev->acl_cnt = hdev->acl_pkts;
4450
}
4451
break;
4452
4453
case SCO_LINK:
4454
case ESCO_LINK:
4455
hdev->sco_cnt += count;
4456
if (hdev->sco_cnt > hdev->sco_pkts)
4457
hdev->sco_cnt = hdev->sco_pkts;
4458
4459
break;
4460
4461
case CIS_LINK:
4462
case BIS_LINK:
4463
case PA_LINK:
4464
hdev->iso_cnt += count;
4465
if (hdev->iso_cnt > hdev->iso_pkts)
4466
hdev->iso_cnt = hdev->iso_pkts;
4467
break;
4468
4469
default:
4470
bt_dev_err(hdev, "unknown type %d conn %p",
4471
conn->type, conn);
4472
break;
4473
}
4474
}
4475
4476
queue_work(hdev->workqueue, &hdev->tx_work);
4477
4478
hci_dev_unlock(hdev);
4479
}
4480
4481
static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4482
struct sk_buff *skb)
4483
{
4484
struct hci_ev_mode_change *ev = data;
4485
struct hci_conn *conn;
4486
4487
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4488
4489
hci_dev_lock(hdev);
4490
4491
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4492
if (conn) {
4493
conn->mode = ev->mode;
4494
4495
if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4496
&conn->flags)) {
4497
if (conn->mode == HCI_CM_ACTIVE)
4498
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4499
else
4500
clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4501
}
4502
4503
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4504
hci_sco_setup(conn, ev->status);
4505
}
4506
4507
hci_dev_unlock(hdev);
4508
}
4509
4510
static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4511
struct sk_buff *skb)
4512
{
4513
struct hci_ev_pin_code_req *ev = data;
4514
struct hci_conn *conn;
4515
4516
bt_dev_dbg(hdev, "");
4517
4518
hci_dev_lock(hdev);
4519
4520
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4521
if (!conn)
4522
goto unlock;
4523
4524
if (conn->state == BT_CONNECTED) {
4525
hci_conn_hold(conn);
4526
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4527
hci_conn_drop(conn);
4528
}
4529
4530
if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4531
!test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4532
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4533
sizeof(ev->bdaddr), &ev->bdaddr);
4534
} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4535
u8 secure;
4536
4537
if (conn->pending_sec_level == BT_SECURITY_HIGH)
4538
secure = 1;
4539
else
4540
secure = 0;
4541
4542
mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4543
}
4544
4545
unlock:
4546
hci_dev_unlock(hdev);
4547
}
4548
4549
static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4550
{
4551
if (key_type == HCI_LK_CHANGED_COMBINATION)
4552
return;
4553
4554
conn->pin_length = pin_len;
4555
conn->key_type = key_type;
4556
4557
switch (key_type) {
4558
case HCI_LK_LOCAL_UNIT:
4559
case HCI_LK_REMOTE_UNIT:
4560
case HCI_LK_DEBUG_COMBINATION:
4561
return;
4562
case HCI_LK_COMBINATION:
4563
if (pin_len == 16)
4564
conn->pending_sec_level = BT_SECURITY_HIGH;
4565
else
4566
conn->pending_sec_level = BT_SECURITY_MEDIUM;
4567
break;
4568
case HCI_LK_UNAUTH_COMBINATION_P192:
4569
case HCI_LK_UNAUTH_COMBINATION_P256:
4570
conn->pending_sec_level = BT_SECURITY_MEDIUM;
4571
break;
4572
case HCI_LK_AUTH_COMBINATION_P192:
4573
conn->pending_sec_level = BT_SECURITY_HIGH;
4574
break;
4575
case HCI_LK_AUTH_COMBINATION_P256:
4576
conn->pending_sec_level = BT_SECURITY_FIPS;
4577
break;
4578
}
4579
}
4580
4581
static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4582
struct sk_buff *skb)
4583
{
4584
struct hci_ev_link_key_req *ev = data;
4585
struct hci_cp_link_key_reply cp;
4586
struct hci_conn *conn;
4587
struct link_key *key;
4588
4589
bt_dev_dbg(hdev, "");
4590
4591
if (!hci_dev_test_flag(hdev, HCI_MGMT))
4592
return;
4593
4594
hci_dev_lock(hdev);
4595
4596
key = hci_find_link_key(hdev, &ev->bdaddr);
4597
if (!key) {
4598
bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4599
goto not_found;
4600
}
4601
4602
bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4603
4604
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4605
if (conn) {
4606
clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4607
4608
if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4609
key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4610
conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4611
bt_dev_dbg(hdev, "ignoring unauthenticated key");
4612
goto not_found;
4613
}
4614
4615
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4616
(conn->pending_sec_level == BT_SECURITY_HIGH ||
4617
conn->pending_sec_level == BT_SECURITY_FIPS)) {
4618
bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4619
goto not_found;
4620
}
4621
4622
conn_set_key(conn, key->type, key->pin_len);
4623
}
4624
4625
bacpy(&cp.bdaddr, &ev->bdaddr);
4626
memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4627
4628
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4629
4630
hci_dev_unlock(hdev);
4631
4632
return;
4633
4634
not_found:
4635
hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4636
hci_dev_unlock(hdev);
4637
}
4638
4639
static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4640
struct sk_buff *skb)
4641
{
4642
struct hci_ev_link_key_notify *ev = data;
4643
struct hci_conn *conn;
4644
struct link_key *key;
4645
bool persistent;
4646
u8 pin_len = 0;
4647
4648
bt_dev_dbg(hdev, "");
4649
4650
hci_dev_lock(hdev);
4651
4652
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4653
if (!conn)
4654
goto unlock;
4655
4656
/* Ignore NULL link key against CVE-2020-26555 */
4657
if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4658
bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4659
&ev->bdaddr);
4660
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4661
hci_conn_drop(conn);
4662
goto unlock;
4663
}
4664
4665
hci_conn_hold(conn);
4666
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4667
hci_conn_drop(conn);
4668
4669
set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4670
conn_set_key(conn, ev->key_type, conn->pin_length);
4671
4672
if (!hci_dev_test_flag(hdev, HCI_MGMT))
4673
goto unlock;
4674
4675
key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4676
ev->key_type, pin_len, &persistent);
4677
if (!key)
4678
goto unlock;
4679
4680
/* Update connection information since adding the key will have
4681
* fixed up the type in the case of changed combination keys.
4682
*/
4683
if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4684
conn_set_key(conn, key->type, key->pin_len);
4685
4686
mgmt_new_link_key(hdev, key, persistent);
4687
4688
/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4689
* is set. If it's not set simply remove the key from the kernel
4690
* list (we've still notified user space about it but with
4691
* store_hint being 0).
4692
*/
4693
if (key->type == HCI_LK_DEBUG_COMBINATION &&
4694
!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4695
list_del_rcu(&key->list);
4696
kfree_rcu(key, rcu);
4697
goto unlock;
4698
}
4699
4700
if (persistent)
4701
clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4702
else
4703
set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4704
4705
unlock:
4706
hci_dev_unlock(hdev);
4707
}
4708
4709
static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4710
struct sk_buff *skb)
4711
{
4712
struct hci_ev_clock_offset *ev = data;
4713
struct hci_conn *conn;
4714
4715
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4716
4717
hci_dev_lock(hdev);
4718
4719
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4720
if (conn && !ev->status) {
4721
struct inquiry_entry *ie;
4722
4723
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4724
if (ie) {
4725
ie->data.clock_offset = ev->clock_offset;
4726
ie->timestamp = jiffies;
4727
}
4728
}
4729
4730
hci_dev_unlock(hdev);
4731
}
4732
4733
static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4734
struct sk_buff *skb)
4735
{
4736
struct hci_ev_pkt_type_change *ev = data;
4737
struct hci_conn *conn;
4738
4739
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4740
4741
hci_dev_lock(hdev);
4742
4743
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4744
if (conn && !ev->status)
4745
conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4746
4747
hci_dev_unlock(hdev);
4748
}
4749
4750
static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4751
struct sk_buff *skb)
4752
{
4753
struct hci_ev_pscan_rep_mode *ev = data;
4754
struct inquiry_entry *ie;
4755
4756
bt_dev_dbg(hdev, "");
4757
4758
hci_dev_lock(hdev);
4759
4760
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4761
if (ie) {
4762
ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4763
ie->timestamp = jiffies;
4764
}
4765
4766
hci_dev_unlock(hdev);
4767
}
4768
4769
static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4770
struct sk_buff *skb)
4771
{
4772
struct hci_ev_inquiry_result_rssi *ev = edata;
4773
struct inquiry_data data;
4774
int i;
4775
4776
bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4777
4778
if (!ev->num)
4779
return;
4780
4781
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4782
return;
4783
4784
hci_dev_lock(hdev);
4785
4786
if (skb->len == array_size(ev->num,
4787
sizeof(struct inquiry_info_rssi_pscan))) {
4788
struct inquiry_info_rssi_pscan *info;
4789
4790
for (i = 0; i < ev->num; i++) {
4791
u32 flags;
4792
4793
info = hci_ev_skb_pull(hdev, skb,
4794
HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4795
sizeof(*info));
4796
if (!info) {
4797
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4798
HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4799
goto unlock;
4800
}
4801
4802
bacpy(&data.bdaddr, &info->bdaddr);
4803
data.pscan_rep_mode = info->pscan_rep_mode;
4804
data.pscan_period_mode = info->pscan_period_mode;
4805
data.pscan_mode = info->pscan_mode;
4806
memcpy(data.dev_class, info->dev_class, 3);
4807
data.clock_offset = info->clock_offset;
4808
data.rssi = info->rssi;
4809
data.ssp_mode = 0x00;
4810
4811
flags = hci_inquiry_cache_update(hdev, &data, false);
4812
4813
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4814
info->dev_class, info->rssi,
4815
flags, NULL, 0, NULL, 0, 0);
4816
}
4817
} else if (skb->len == array_size(ev->num,
4818
sizeof(struct inquiry_info_rssi))) {
4819
struct inquiry_info_rssi *info;
4820
4821
for (i = 0; i < ev->num; i++) {
4822
u32 flags;
4823
4824
info = hci_ev_skb_pull(hdev, skb,
4825
HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4826
sizeof(*info));
4827
if (!info) {
4828
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4829
HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4830
goto unlock;
4831
}
4832
4833
bacpy(&data.bdaddr, &info->bdaddr);
4834
data.pscan_rep_mode = info->pscan_rep_mode;
4835
data.pscan_period_mode = info->pscan_period_mode;
4836
data.pscan_mode = 0x00;
4837
memcpy(data.dev_class, info->dev_class, 3);
4838
data.clock_offset = info->clock_offset;
4839
data.rssi = info->rssi;
4840
data.ssp_mode = 0x00;
4841
4842
flags = hci_inquiry_cache_update(hdev, &data, false);
4843
4844
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4845
info->dev_class, info->rssi,
4846
flags, NULL, 0, NULL, 0, 0);
4847
}
4848
} else {
4849
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4850
HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4851
}
4852
unlock:
4853
hci_dev_unlock(hdev);
4854
}
4855
4856
static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4857
struct sk_buff *skb)
4858
{
4859
struct hci_ev_remote_ext_features *ev = data;
4860
struct hci_conn *conn;
4861
4862
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4863
4864
hci_dev_lock(hdev);
4865
4866
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4867
if (!conn)
4868
goto unlock;
4869
4870
if (ev->page < HCI_MAX_PAGES)
4871
memcpy(conn->features[ev->page], ev->features, 8);
4872
4873
if (!ev->status && ev->page == 0x01) {
4874
struct inquiry_entry *ie;
4875
4876
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4877
if (ie)
4878
ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4879
4880
if (ev->features[0] & LMP_HOST_SSP) {
4881
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4882
} else {
4883
/* It is mandatory by the Bluetooth specification that
4884
* Extended Inquiry Results are only used when Secure
4885
* Simple Pairing is enabled, but some devices violate
4886
* this.
4887
*
4888
* To make these devices work, the internal SSP
4889
* enabled flag needs to be cleared if the remote host
4890
* features do not indicate SSP support */
4891
clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4892
}
4893
4894
if (ev->features[0] & LMP_HOST_SC)
4895
set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4896
}
4897
4898
if (conn->state != BT_CONFIG)
4899
goto unlock;
4900
4901
if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4902
struct hci_cp_remote_name_req cp;
4903
memset(&cp, 0, sizeof(cp));
4904
bacpy(&cp.bdaddr, &conn->dst);
4905
cp.pscan_rep_mode = 0x02;
4906
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4907
} else {
4908
mgmt_device_connected(hdev, conn, NULL, 0);
4909
}
4910
4911
if (!hci_outgoing_auth_needed(hdev, conn)) {
4912
conn->state = BT_CONNECTED;
4913
hci_connect_cfm(conn, ev->status);
4914
hci_conn_drop(conn);
4915
}
4916
4917
unlock:
4918
hci_dev_unlock(hdev);
4919
}
4920
4921
static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4922
struct sk_buff *skb)
4923
{
4924
struct hci_ev_sync_conn_complete *ev = data;
4925
struct hci_conn *conn;
4926
u8 status = ev->status;
4927
4928
switch (ev->link_type) {
4929
case SCO_LINK:
4930
case ESCO_LINK:
4931
break;
4932
default:
4933
/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4934
* for HCI_Synchronous_Connection_Complete is limited to
4935
* either SCO or eSCO
4936
*/
4937
bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4938
return;
4939
}
4940
4941
bt_dev_dbg(hdev, "status 0x%2.2x", status);
4942
4943
hci_dev_lock(hdev);
4944
4945
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4946
if (!conn) {
4947
if (ev->link_type == ESCO_LINK)
4948
goto unlock;
4949
4950
/* When the link type in the event indicates SCO connection
4951
* and lookup of the connection object fails, then check
4952
* if an eSCO connection object exists.
4953
*
4954
* The core limits the synchronous connections to either
4955
* SCO or eSCO. The eSCO connection is preferred and tried
4956
* to be setup first and until successfully established,
4957
* the link type will be hinted as eSCO.
4958
*/
4959
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4960
if (!conn)
4961
goto unlock;
4962
}
4963
4964
/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4965
* Processing it more than once per connection can corrupt kernel memory.
4966
*
4967
* As the connection handle is set here for the first time, it indicates
4968
* whether the connection is already set up.
4969
*/
4970
if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4971
bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4972
goto unlock;
4973
}
4974
4975
switch (status) {
4976
case 0x00:
4977
status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4978
if (status) {
4979
conn->state = BT_CLOSED;
4980
break;
4981
}
4982
4983
conn->state = BT_CONNECTED;
4984
conn->type = ev->link_type;
4985
4986
hci_debugfs_create_conn(conn);
4987
hci_conn_add_sysfs(conn);
4988
break;
4989
4990
case 0x10: /* Connection Accept Timeout */
4991
case 0x0d: /* Connection Rejected due to Limited Resources */
4992
case 0x11: /* Unsupported Feature or Parameter Value */
4993
case 0x1c: /* SCO interval rejected */
4994
case 0x1a: /* Unsupported Remote Feature */
4995
case 0x1e: /* Invalid LMP Parameters */
4996
case 0x1f: /* Unspecified error */
4997
case 0x20: /* Unsupported LMP Parameter value */
4998
if (conn->out) {
4999
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5000
(hdev->esco_type & EDR_ESCO_MASK);
5001
if (hci_setup_sync(conn, conn->parent->handle))
5002
goto unlock;
5003
}
5004
fallthrough;
5005
5006
default:
5007
conn->state = BT_CLOSED;
5008
break;
5009
}
5010
5011
bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5012
/* Notify only in case of SCO over HCI transport data path which
5013
* is zero and non-zero value shall be non-HCI transport data path
5014
*/
5015
if (conn->codec.data_path == 0 && hdev->notify) {
5016
switch (ev->air_mode) {
5017
case 0x02:
5018
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5019
break;
5020
case 0x03:
5021
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5022
break;
5023
}
5024
}
5025
5026
hci_connect_cfm(conn, status);
5027
if (status)
5028
hci_conn_del(conn);
5029
5030
unlock:
5031
hci_dev_unlock(hdev);
5032
}
5033
5034
static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5035
{
5036
size_t parsed = 0;
5037
5038
while (parsed < eir_len) {
5039
u8 field_len = eir[0];
5040
5041
if (field_len == 0)
5042
return parsed;
5043
5044
parsed += field_len + 1;
5045
eir += field_len + 1;
5046
}
5047
5048
return eir_len;
5049
}
5050
5051
static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5052
struct sk_buff *skb)
5053
{
5054
struct hci_ev_ext_inquiry_result *ev = edata;
5055
struct inquiry_data data;
5056
size_t eir_len;
5057
int i;
5058
5059
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5060
flex_array_size(ev, info, ev->num)))
5061
return;
5062
5063
bt_dev_dbg(hdev, "num %d", ev->num);
5064
5065
if (!ev->num)
5066
return;
5067
5068
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5069
return;
5070
5071
hci_dev_lock(hdev);
5072
5073
for (i = 0; i < ev->num; i++) {
5074
struct extended_inquiry_info *info = &ev->info[i];
5075
u32 flags;
5076
bool name_known;
5077
5078
bacpy(&data.bdaddr, &info->bdaddr);
5079
data.pscan_rep_mode = info->pscan_rep_mode;
5080
data.pscan_period_mode = info->pscan_period_mode;
5081
data.pscan_mode = 0x00;
5082
memcpy(data.dev_class, info->dev_class, 3);
5083
data.clock_offset = info->clock_offset;
5084
data.rssi = info->rssi;
5085
data.ssp_mode = 0x01;
5086
5087
if (hci_dev_test_flag(hdev, HCI_MGMT))
5088
name_known = eir_get_data(info->data,
5089
sizeof(info->data),
5090
EIR_NAME_COMPLETE, NULL);
5091
else
5092
name_known = true;
5093
5094
flags = hci_inquiry_cache_update(hdev, &data, name_known);
5095
5096
eir_len = eir_get_length(info->data, sizeof(info->data));
5097
5098
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5099
info->dev_class, info->rssi,
5100
flags, info->data, eir_len, NULL, 0, 0);
5101
}
5102
5103
hci_dev_unlock(hdev);
5104
}
5105
5106
static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5107
struct sk_buff *skb)
5108
{
5109
struct hci_ev_key_refresh_complete *ev = data;
5110
struct hci_conn *conn;
5111
5112
bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5113
__le16_to_cpu(ev->handle));
5114
5115
hci_dev_lock(hdev);
5116
5117
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5118
if (!conn)
5119
goto unlock;
5120
5121
/* For BR/EDR the necessary steps are taken through the
5122
* auth_complete event.
5123
*/
5124
if (conn->type != LE_LINK)
5125
goto unlock;
5126
5127
if (!ev->status)
5128
conn->sec_level = conn->pending_sec_level;
5129
5130
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5131
5132
if (ev->status && conn->state == BT_CONNECTED) {
5133
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5134
hci_conn_drop(conn);
5135
goto unlock;
5136
}
5137
5138
if (conn->state == BT_CONFIG) {
5139
if (!ev->status)
5140
conn->state = BT_CONNECTED;
5141
5142
hci_connect_cfm(conn, ev->status);
5143
hci_conn_drop(conn);
5144
} else {
5145
hci_auth_cfm(conn, ev->status);
5146
5147
hci_conn_hold(conn);
5148
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5149
hci_conn_drop(conn);
5150
}
5151
5152
unlock:
5153
hci_dev_unlock(hdev);
5154
}
5155
5156
static u8 hci_get_auth_req(struct hci_conn *conn)
5157
{
5158
/* If remote requests no-bonding follow that lead */
5159
if (conn->remote_auth == HCI_AT_NO_BONDING ||
5160
conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5161
return conn->remote_auth | (conn->auth_type & 0x01);
5162
5163
/* If both remote and local have enough IO capabilities, require
5164
* MITM protection
5165
*/
5166
if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5167
conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5168
return conn->remote_auth | 0x01;
5169
5170
/* No MITM protection possible so ignore remote requirement */
5171
return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5172
}
5173
5174
static u8 bredr_oob_data_present(struct hci_conn *conn)
5175
{
5176
struct hci_dev *hdev = conn->hdev;
5177
struct oob_data *data;
5178
5179
data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5180
if (!data)
5181
return 0x00;
5182
5183
if (bredr_sc_enabled(hdev)) {
5184
/* When Secure Connections is enabled, then just
5185
* return the present value stored with the OOB
5186
* data. The stored value contains the right present
5187
* information. However it can only be trusted when
5188
* not in Secure Connection Only mode.
5189
*/
5190
if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5191
return data->present;
5192
5193
/* When Secure Connections Only mode is enabled, then
5194
* the P-256 values are required. If they are not
5195
* available, then do not declare that OOB data is
5196
* present.
5197
*/
5198
if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5199
!crypto_memneq(data->hash256, ZERO_KEY, 16))
5200
return 0x00;
5201
5202
return 0x02;
5203
}
5204
5205
/* When Secure Connections is not enabled or actually
5206
* not supported by the hardware, then check that if
5207
* P-192 data values are present.
5208
*/
5209
if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5210
!crypto_memneq(data->hash192, ZERO_KEY, 16))
5211
return 0x00;
5212
5213
return 0x01;
5214
}
5215
5216
static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5217
struct sk_buff *skb)
5218
{
5219
struct hci_ev_io_capa_request *ev = data;
5220
struct hci_conn *conn;
5221
5222
bt_dev_dbg(hdev, "");
5223
5224
hci_dev_lock(hdev);
5225
5226
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5227
if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5228
goto unlock;
5229
5230
/* Assume remote supports SSP since it has triggered this event */
5231
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5232
5233
hci_conn_hold(conn);
5234
5235
if (!hci_dev_test_flag(hdev, HCI_MGMT))
5236
goto unlock;
5237
5238
/* Allow pairing if we're pairable, the initiators of the
5239
* pairing or if the remote is not requesting bonding.
5240
*/
5241
if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5242
test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5243
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5244
struct hci_cp_io_capability_reply cp;
5245
5246
bacpy(&cp.bdaddr, &ev->bdaddr);
5247
/* Change the IO capability from KeyboardDisplay
5248
* to DisplayYesNo as it is not supported by BT spec. */
5249
cp.capability = (conn->io_capability == 0x04) ?
5250
HCI_IO_DISPLAY_YESNO : conn->io_capability;
5251
5252
/* If we are initiators, there is no remote information yet */
5253
if (conn->remote_auth == 0xff) {
5254
/* Request MITM protection if our IO caps allow it
5255
* except for the no-bonding case.
5256
*/
5257
if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5258
conn->auth_type != HCI_AT_NO_BONDING)
5259
conn->auth_type |= 0x01;
5260
} else {
5261
conn->auth_type = hci_get_auth_req(conn);
5262
}
5263
5264
/* If we're not bondable, force one of the non-bondable
5265
* authentication requirement values.
5266
*/
5267
if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5268
conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5269
5270
cp.authentication = conn->auth_type;
5271
cp.oob_data = bredr_oob_data_present(conn);
5272
5273
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5274
sizeof(cp), &cp);
5275
} else {
5276
struct hci_cp_io_capability_neg_reply cp;
5277
5278
bacpy(&cp.bdaddr, &ev->bdaddr);
5279
cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5280
5281
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5282
sizeof(cp), &cp);
5283
}
5284
5285
unlock:
5286
hci_dev_unlock(hdev);
5287
}
5288
5289
static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5290
struct sk_buff *skb)
5291
{
5292
struct hci_ev_io_capa_reply *ev = data;
5293
struct hci_conn *conn;
5294
5295
bt_dev_dbg(hdev, "");
5296
5297
hci_dev_lock(hdev);
5298
5299
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5300
if (!conn)
5301
goto unlock;
5302
5303
conn->remote_cap = ev->capability;
5304
conn->remote_auth = ev->authentication;
5305
5306
unlock:
5307
hci_dev_unlock(hdev);
5308
}
5309
5310
static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5311
struct sk_buff *skb)
5312
{
5313
struct hci_ev_user_confirm_req *ev = data;
5314
int loc_mitm, rem_mitm, confirm_hint = 0;
5315
struct hci_conn *conn;
5316
5317
bt_dev_dbg(hdev, "");
5318
5319
hci_dev_lock(hdev);
5320
5321
if (!hci_dev_test_flag(hdev, HCI_MGMT))
5322
goto unlock;
5323
5324
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5325
if (!conn)
5326
goto unlock;
5327
5328
loc_mitm = (conn->auth_type & 0x01);
5329
rem_mitm = (conn->remote_auth & 0x01);
5330
5331
/* If we require MITM but the remote device can't provide that
5332
* (it has NoInputNoOutput) then reject the confirmation
5333
* request. We check the security level here since it doesn't
5334
* necessarily match conn->auth_type.
5335
*/
5336
if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5337
conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5338
bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5339
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5340
sizeof(ev->bdaddr), &ev->bdaddr);
5341
goto unlock;
5342
}
5343
5344
/* If no side requires MITM protection; use JUST_CFM method */
5345
if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5346
(!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5347
5348
/* If we're not the initiator of request authorization and the
5349
* local IO capability is not NoInputNoOutput, use JUST_WORKS
5350
* method (mgmt_user_confirm with confirm_hint set to 1).
5351
*/
5352
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5353
conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5354
bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5355
confirm_hint = 1;
5356
goto confirm;
5357
}
5358
5359
/* If there already exists link key in local host, leave the
5360
* decision to user space since the remote device could be
5361
* legitimate or malicious.
5362
*/
5363
if (hci_find_link_key(hdev, &ev->bdaddr)) {
5364
bt_dev_dbg(hdev, "Local host already has link key");
5365
confirm_hint = 1;
5366
goto confirm;
5367
}
5368
5369
BT_DBG("Auto-accept of user confirmation with %ums delay",
5370
hdev->auto_accept_delay);
5371
5372
if (hdev->auto_accept_delay > 0) {
5373
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5374
queue_delayed_work(conn->hdev->workqueue,
5375
&conn->auto_accept_work, delay);
5376
goto unlock;
5377
}
5378
5379
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5380
sizeof(ev->bdaddr), &ev->bdaddr);
5381
goto unlock;
5382
}
5383
5384
confirm:
5385
mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5386
le32_to_cpu(ev->passkey), confirm_hint);
5387
5388
unlock:
5389
hci_dev_unlock(hdev);
5390
}
5391
5392
static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5393
struct sk_buff *skb)
5394
{
5395
struct hci_ev_user_passkey_req *ev = data;
5396
5397
bt_dev_dbg(hdev, "");
5398
5399
if (hci_dev_test_flag(hdev, HCI_MGMT))
5400
mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5401
}
5402
5403
static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5404
struct sk_buff *skb)
5405
{
5406
struct hci_ev_user_passkey_notify *ev = data;
5407
struct hci_conn *conn;
5408
5409
bt_dev_dbg(hdev, "");
5410
5411
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5412
if (!conn)
5413
return;
5414
5415
conn->passkey_notify = __le32_to_cpu(ev->passkey);
5416
conn->passkey_entered = 0;
5417
5418
if (hci_dev_test_flag(hdev, HCI_MGMT))
5419
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5420
conn->dst_type, conn->passkey_notify,
5421
conn->passkey_entered);
5422
}
5423
5424
static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5425
struct sk_buff *skb)
5426
{
5427
struct hci_ev_keypress_notify *ev = data;
5428
struct hci_conn *conn;
5429
5430
bt_dev_dbg(hdev, "");
5431
5432
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5433
if (!conn)
5434
return;
5435
5436
switch (ev->type) {
5437
case HCI_KEYPRESS_STARTED:
5438
conn->passkey_entered = 0;
5439
return;
5440
5441
case HCI_KEYPRESS_ENTERED:
5442
conn->passkey_entered++;
5443
break;
5444
5445
case HCI_KEYPRESS_ERASED:
5446
conn->passkey_entered--;
5447
break;
5448
5449
case HCI_KEYPRESS_CLEARED:
5450
conn->passkey_entered = 0;
5451
break;
5452
5453
case HCI_KEYPRESS_COMPLETED:
5454
return;
5455
}
5456
5457
if (hci_dev_test_flag(hdev, HCI_MGMT))
5458
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5459
conn->dst_type, conn->passkey_notify,
5460
conn->passkey_entered);
5461
}
5462
5463
static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5464
struct sk_buff *skb)
5465
{
5466
struct hci_ev_simple_pair_complete *ev = data;
5467
struct hci_conn *conn;
5468
5469
bt_dev_dbg(hdev, "");
5470
5471
hci_dev_lock(hdev);
5472
5473
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5474
if (!conn || !hci_conn_ssp_enabled(conn))
5475
goto unlock;
5476
5477
/* Reset the authentication requirement to unknown */
5478
conn->remote_auth = 0xff;
5479
5480
/* To avoid duplicate auth_failed events to user space we check
5481
* the HCI_CONN_AUTH_PEND flag which will be set if we
5482
* initiated the authentication. A traditional auth_complete
5483
* event gets always produced as initiator and is also mapped to
5484
* the mgmt_auth_failed event */
5485
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5486
mgmt_auth_failed(conn, ev->status);
5487
5488
hci_conn_drop(conn);
5489
5490
unlock:
5491
hci_dev_unlock(hdev);
5492
}
5493
5494
static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5495
struct sk_buff *skb)
5496
{
5497
struct hci_ev_remote_host_features *ev = data;
5498
struct inquiry_entry *ie;
5499
struct hci_conn *conn;
5500
5501
bt_dev_dbg(hdev, "");
5502
5503
hci_dev_lock(hdev);
5504
5505
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5506
if (conn)
5507
memcpy(conn->features[1], ev->features, 8);
5508
5509
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5510
if (ie)
5511
ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5512
5513
hci_dev_unlock(hdev);
5514
}
5515
5516
static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5517
struct sk_buff *skb)
5518
{
5519
struct hci_ev_remote_oob_data_request *ev = edata;
5520
struct oob_data *data;
5521
5522
bt_dev_dbg(hdev, "");
5523
5524
hci_dev_lock(hdev);
5525
5526
if (!hci_dev_test_flag(hdev, HCI_MGMT))
5527
goto unlock;
5528
5529
data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5530
if (!data) {
5531
struct hci_cp_remote_oob_data_neg_reply cp;
5532
5533
bacpy(&cp.bdaddr, &ev->bdaddr);
5534
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5535
sizeof(cp), &cp);
5536
goto unlock;
5537
}
5538
5539
if (bredr_sc_enabled(hdev)) {
5540
struct hci_cp_remote_oob_ext_data_reply cp;
5541
5542
bacpy(&cp.bdaddr, &ev->bdaddr);
5543
if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5544
memset(cp.hash192, 0, sizeof(cp.hash192));
5545
memset(cp.rand192, 0, sizeof(cp.rand192));
5546
} else {
5547
memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5548
memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5549
}
5550
memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5551
memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5552
5553
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5554
sizeof(cp), &cp);
5555
} else {
5556
struct hci_cp_remote_oob_data_reply cp;
5557
5558
bacpy(&cp.bdaddr, &ev->bdaddr);
5559
memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5560
memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5561
5562
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5563
sizeof(cp), &cp);
5564
}
5565
5566
unlock:
5567
hci_dev_unlock(hdev);
5568
}
5569
5570
static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5571
u8 bdaddr_type, bdaddr_t *local_rpa)
5572
{
5573
if (conn->out) {
5574
conn->dst_type = bdaddr_type;
5575
conn->resp_addr_type = bdaddr_type;
5576
bacpy(&conn->resp_addr, bdaddr);
5577
5578
/* Check if the controller has set a Local RPA then it must be
5579
* used instead or hdev->rpa.
5580
*/
5581
if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5582
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5583
bacpy(&conn->init_addr, local_rpa);
5584
} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5585
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5586
bacpy(&conn->init_addr, &conn->hdev->rpa);
5587
} else {
5588
hci_copy_identity_address(conn->hdev, &conn->init_addr,
5589
&conn->init_addr_type);
5590
}
5591
} else {
5592
conn->resp_addr_type = conn->hdev->adv_addr_type;
5593
/* Check if the controller has set a Local RPA then it must be
5594
* used instead or hdev->rpa.
5595
*/
5596
if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5597
conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5598
bacpy(&conn->resp_addr, local_rpa);
5599
} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5600
/* In case of ext adv, resp_addr will be updated in
5601
* Adv Terminated event.
5602
*/
5603
if (!ext_adv_capable(conn->hdev))
5604
bacpy(&conn->resp_addr,
5605
&conn->hdev->random_addr);
5606
} else {
5607
bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5608
}
5609
5610
conn->init_addr_type = bdaddr_type;
5611
bacpy(&conn->init_addr, bdaddr);
5612
5613
/* For incoming connections, set the default minimum
5614
* and maximum connection interval. They will be used
5615
* to check if the parameters are in range and if not
5616
* trigger the connection update procedure.
5617
*/
5618
conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5619
conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5620
}
5621
}
5622
5623
static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5624
bdaddr_t *bdaddr, u8 bdaddr_type,
5625
bdaddr_t *local_rpa, u8 role, u16 handle,
5626
u16 interval, u16 latency,
5627
u16 supervision_timeout)
5628
{
5629
struct hci_conn_params *params;
5630
struct hci_conn *conn;
5631
struct smp_irk *irk;
5632
u8 addr_type;
5633
5634
hci_dev_lock(hdev);
5635
5636
/* All controllers implicitly stop advertising in the event of a
5637
* connection, so ensure that the state bit is cleared.
5638
*/
5639
hci_dev_clear_flag(hdev, HCI_LE_ADV);
5640
5641
/* Check for existing connection:
5642
*
5643
* 1. If it doesn't exist then use the role to create a new object.
5644
* 2. If it does exist confirm that it is connecting/BT_CONNECT in case
5645
* of initiator/master role since there could be a collision where
5646
* either side is attempting to connect or something like a fuzzing
5647
* testing is trying to play tricks to destroy the hcon object before
5648
* it even attempts to connect (e.g. hcon->state == BT_OPEN).
5649
*/
5650
conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
5651
if (!conn ||
5652
(conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
5653
/* In case of error status and there is no connection pending
5654
* just unlock as there is nothing to cleanup.
5655
*/
5656
if (status)
5657
goto unlock;
5658
5659
conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5660
if (IS_ERR(conn)) {
5661
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5662
goto unlock;
5663
}
5664
5665
conn->dst_type = bdaddr_type;
5666
5667
/* If we didn't have a hci_conn object previously
5668
* but we're in central role this must be something
5669
* initiated using an accept list. Since accept list based
5670
* connections are not "first class citizens" we don't
5671
* have full tracking of them. Therefore, we go ahead
5672
* with a "best effort" approach of determining the
5673
* initiator address based on the HCI_PRIVACY flag.
5674
*/
5675
if (conn->out) {
5676
conn->resp_addr_type = bdaddr_type;
5677
bacpy(&conn->resp_addr, bdaddr);
5678
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5679
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5680
bacpy(&conn->init_addr, &hdev->rpa);
5681
} else {
5682
hci_copy_identity_address(hdev,
5683
&conn->init_addr,
5684
&conn->init_addr_type);
5685
}
5686
}
5687
} else {
5688
cancel_delayed_work(&conn->le_conn_timeout);
5689
}
5690
5691
/* The HCI_LE_Connection_Complete event is only sent once per connection.
5692
* Processing it more than once per connection can corrupt kernel memory.
5693
*
5694
* As the connection handle is set here for the first time, it indicates
5695
* whether the connection is already set up.
5696
*/
5697
if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5698
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5699
goto unlock;
5700
}
5701
5702
le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5703
5704
/* Lookup the identity address from the stored connection
5705
* address and address type.
5706
*
5707
* When establishing connections to an identity address, the
5708
* connection procedure will store the resolvable random
5709
* address first. Now if it can be converted back into the
5710
* identity address, start using the identity address from
5711
* now on.
5712
*/
5713
irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5714
if (irk) {
5715
bacpy(&conn->dst, &irk->bdaddr);
5716
conn->dst_type = irk->addr_type;
5717
}
5718
5719
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5720
5721
/* All connection failure handling is taken care of by the
5722
* hci_conn_failed function which is triggered by the HCI
5723
* request completion callbacks used for connecting.
5724
*/
5725
if (status || hci_conn_set_handle(conn, handle))
5726
goto unlock;
5727
5728
/* Drop the connection if it has been aborted */
5729
if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5730
hci_conn_drop(conn);
5731
goto unlock;
5732
}
5733
5734
if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5735
addr_type = BDADDR_LE_PUBLIC;
5736
else
5737
addr_type = BDADDR_LE_RANDOM;
5738
5739
/* Drop the connection if the device is blocked */
5740
if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5741
hci_conn_drop(conn);
5742
goto unlock;
5743
}
5744
5745
mgmt_device_connected(hdev, conn, NULL, 0);
5746
5747
conn->sec_level = BT_SECURITY_LOW;
5748
conn->state = BT_CONFIG;
5749
5750
/* Store current advertising instance as connection advertising instance
5751
* when software rotation is in use so it can be re-enabled when
5752
* disconnected.
5753
*/
5754
if (!ext_adv_capable(hdev))
5755
conn->adv_instance = hdev->cur_adv_instance;
5756
5757
conn->le_conn_interval = interval;
5758
conn->le_conn_latency = latency;
5759
conn->le_supv_timeout = supervision_timeout;
5760
5761
hci_debugfs_create_conn(conn);
5762
hci_conn_add_sysfs(conn);
5763
5764
/* The remote features procedure is defined for central
5765
* role only. So only in case of an initiated connection
5766
* request the remote features.
5767
*
5768
* If the local controller supports peripheral-initiated features
5769
* exchange, then requesting the remote features in peripheral
5770
* role is possible. Otherwise just transition into the
5771
* connected state without requesting the remote features.
5772
*/
5773
if (conn->out ||
5774
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5775
struct hci_cp_le_read_remote_features cp;
5776
5777
cp.handle = __cpu_to_le16(conn->handle);
5778
5779
hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5780
sizeof(cp), &cp);
5781
5782
hci_conn_hold(conn);
5783
} else {
5784
conn->state = BT_CONNECTED;
5785
hci_connect_cfm(conn, status);
5786
}
5787
5788
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5789
conn->dst_type);
5790
if (params) {
5791
hci_pend_le_list_del_init(params);
5792
if (params->conn) {
5793
hci_conn_drop(params->conn);
5794
hci_conn_put(params->conn);
5795
params->conn = NULL;
5796
}
5797
}
5798
5799
unlock:
5800
hci_update_passive_scan(hdev);
5801
hci_dev_unlock(hdev);
5802
}
5803
5804
static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5805
struct sk_buff *skb)
5806
{
5807
struct hci_ev_le_conn_complete *ev = data;
5808
5809
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5810
5811
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5812
NULL, ev->role, le16_to_cpu(ev->handle),
5813
le16_to_cpu(ev->interval),
5814
le16_to_cpu(ev->latency),
5815
le16_to_cpu(ev->supervision_timeout));
5816
}
5817
5818
static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5819
struct sk_buff *skb)
5820
{
5821
struct hci_ev_le_enh_conn_complete *ev = data;
5822
5823
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5824
5825
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5826
&ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5827
le16_to_cpu(ev->interval),
5828
le16_to_cpu(ev->latency),
5829
le16_to_cpu(ev->supervision_timeout));
5830
}
5831
5832
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5833
struct sk_buff *skb)
5834
{
5835
struct hci_evt_le_ext_adv_set_term *ev = data;
5836
struct hci_conn *conn;
5837
struct adv_info *adv, *n;
5838
5839
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5840
5841
/* The Bluetooth Core 5.3 specification clearly states that this event
5842
* shall not be sent when the Host disables the advertising set. So in
5843
* case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5844
*
5845
* When the Host disables an advertising set, all cleanup is done via
5846
* its command callback and not needed to be duplicated here.
5847
*/
5848
if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5849
bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5850
return;
5851
}
5852
5853
hci_dev_lock(hdev);
5854
5855
adv = hci_find_adv_instance(hdev, ev->handle);
5856
5857
if (ev->status) {
5858
if (!adv)
5859
goto unlock;
5860
5861
/* Remove advertising as it has been terminated */
5862
hci_remove_adv_instance(hdev, ev->handle);
5863
mgmt_advertising_removed(NULL, hdev, ev->handle);
5864
5865
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5866
if (adv->enabled)
5867
goto unlock;
5868
}
5869
5870
/* We are no longer advertising, clear HCI_LE_ADV */
5871
hci_dev_clear_flag(hdev, HCI_LE_ADV);
5872
goto unlock;
5873
}
5874
5875
if (adv)
5876
adv->enabled = false;
5877
5878
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5879
if (conn) {
5880
/* Store handle in the connection so the correct advertising
5881
* instance can be re-enabled when disconnected.
5882
*/
5883
conn->adv_instance = ev->handle;
5884
5885
if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5886
bacmp(&conn->resp_addr, BDADDR_ANY))
5887
goto unlock;
5888
5889
if (!ev->handle) {
5890
bacpy(&conn->resp_addr, &hdev->random_addr);
5891
goto unlock;
5892
}
5893
5894
if (adv)
5895
bacpy(&conn->resp_addr, &adv->random_addr);
5896
}
5897
5898
unlock:
5899
hci_dev_unlock(hdev);
5900
}
5901
5902
static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5903
struct sk_buff *skb)
5904
{
5905
struct hci_ev_le_conn_update_complete *ev = data;
5906
struct hci_conn *conn;
5907
5908
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5909
5910
if (ev->status)
5911
return;
5912
5913
hci_dev_lock(hdev);
5914
5915
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5916
if (conn) {
5917
conn->le_conn_interval = le16_to_cpu(ev->interval);
5918
conn->le_conn_latency = le16_to_cpu(ev->latency);
5919
conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5920
}
5921
5922
hci_dev_unlock(hdev);
5923
}
5924
5925
/* This function requires the caller holds hdev->lock */
5926
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5927
bdaddr_t *addr,
5928
u8 addr_type, bool addr_resolved,
5929
u8 adv_type, u8 phy, u8 sec_phy)
5930
{
5931
struct hci_conn *conn;
5932
struct hci_conn_params *params;
5933
5934
/* If the event is not connectable don't proceed further */
5935
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5936
return NULL;
5937
5938
/* Ignore if the device is blocked or hdev is suspended */
5939
if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5940
hdev->suspended)
5941
return NULL;
5942
5943
/* Most controller will fail if we try to create new connections
5944
* while we have an existing one in peripheral role.
5945
*/
5946
if (hdev->conn_hash.le_num_peripheral > 0 &&
5947
(hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
5948
!(hdev->le_states[3] & 0x10)))
5949
return NULL;
5950
5951
/* If we're not connectable only connect devices that we have in
5952
* our pend_le_conns list.
5953
*/
5954
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5955
addr_type);
5956
if (!params)
5957
return NULL;
5958
5959
if (!params->explicit_connect) {
5960
switch (params->auto_connect) {
5961
case HCI_AUTO_CONN_DIRECT:
5962
/* Only devices advertising with ADV_DIRECT_IND are
5963
* triggering a connection attempt. This is allowing
5964
* incoming connections from peripheral devices.
5965
*/
5966
if (adv_type != LE_ADV_DIRECT_IND)
5967
return NULL;
5968
break;
5969
case HCI_AUTO_CONN_ALWAYS:
5970
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5971
* are triggering a connection attempt. This means
5972
* that incoming connections from peripheral device are
5973
* accepted and also outgoing connections to peripheral
5974
* devices are established when found.
5975
*/
5976
break;
5977
default:
5978
return NULL;
5979
}
5980
}
5981
5982
conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5983
BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5984
HCI_ROLE_MASTER, phy, sec_phy);
5985
if (!IS_ERR(conn)) {
5986
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5987
* by higher layer that tried to connect, if no then
5988
* store the pointer since we don't really have any
5989
* other owner of the object besides the params that
5990
* triggered it. This way we can abort the connection if
5991
* the parameters get removed and keep the reference
5992
* count consistent once the connection is established.
5993
*/
5994
5995
if (!params->explicit_connect)
5996
params->conn = hci_conn_get(conn);
5997
5998
return conn;
5999
}
6000
6001
switch (PTR_ERR(conn)) {
6002
case -EBUSY:
6003
/* If hci_connect() returns -EBUSY it means there is already
6004
* an LE connection attempt going on. Since controllers don't
6005
* support more than one connection attempt at the time, we
6006
* don't consider this an error case.
6007
*/
6008
break;
6009
default:
6010
BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6011
return NULL;
6012
}
6013
6014
return NULL;
6015
}
6016
6017
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6018
u8 bdaddr_type, bdaddr_t *direct_addr,
6019
u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6020
u8 *data, u8 len, bool ext_adv, bool ctl_time,
6021
u64 instant)
6022
{
6023
struct discovery_state *d = &hdev->discovery;
6024
struct smp_irk *irk;
6025
struct hci_conn *conn;
6026
bool match, bdaddr_resolved;
6027
u32 flags;
6028
u8 *ptr;
6029
6030
switch (type) {
6031
case LE_ADV_IND:
6032
case LE_ADV_DIRECT_IND:
6033
case LE_ADV_SCAN_IND:
6034
case LE_ADV_NONCONN_IND:
6035
case LE_ADV_SCAN_RSP:
6036
break;
6037
default:
6038
bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6039
"type: 0x%02x", type);
6040
return;
6041
}
6042
6043
if (len > max_adv_len(hdev)) {
6044
bt_dev_err_ratelimited(hdev,
6045
"adv larger than maximum supported");
6046
return;
6047
}
6048
6049
/* Find the end of the data in case the report contains padded zero
6050
* bytes at the end causing an invalid length value.
6051
*
6052
* When data is NULL, len is 0 so there is no need for extra ptr
6053
* check as 'ptr < data + 0' is already false in such case.
6054
*/
6055
for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6056
if (ptr + 1 + *ptr > data + len)
6057
break;
6058
}
6059
6060
/* Adjust for actual length. This handles the case when remote
6061
* device is advertising with incorrect data length.
6062
*/
6063
len = ptr - data;
6064
6065
/* If the direct address is present, then this report is from
6066
* a LE Direct Advertising Report event. In that case it is
6067
* important to see if the address is matching the local
6068
* controller address.
6069
*
6070
* If local privacy is not enable the controller shall not be
6071
* generating such event since according to its documentation it is only
6072
* valid for filter_policy 0x02 and 0x03, but the fact that it did
6073
* generate LE Direct Advertising Report means it is probably broken and
6074
* won't generate any other event which can potentially break
6075
* auto-connect logic so in case local privacy is not enable this
6076
* ignores the direct_addr so it works as a regular report.
6077
*/
6078
if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr &&
6079
hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6080
direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6081
&bdaddr_resolved);
6082
6083
/* Only resolvable random addresses are valid for these
6084
* kind of reports and others can be ignored.
6085
*/
6086
if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6087
return;
6088
6089
/* If the local IRK of the controller does not match
6090
* with the resolvable random address provided, then
6091
* this report can be ignored.
6092
*/
6093
if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6094
return;
6095
}
6096
6097
/* Check if we need to convert to identity address */
6098
irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6099
if (irk) {
6100
bdaddr = &irk->bdaddr;
6101
bdaddr_type = irk->addr_type;
6102
}
6103
6104
bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6105
6106
/* Check if we have been requested to connect to this device.
6107
*
6108
* direct_addr is set only for directed advertising reports (it is NULL
6109
* for advertising reports) and is already verified to be RPA above.
6110
*/
6111
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6112
type, phy, sec_phy);
6113
if (!ext_adv && conn && type == LE_ADV_IND &&
6114
len <= max_adv_len(hdev)) {
6115
/* Store report for later inclusion by
6116
* mgmt_device_connected
6117
*/
6118
memcpy(conn->le_adv_data, data, len);
6119
conn->le_adv_data_len = len;
6120
}
6121
6122
if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6123
flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6124
else
6125
flags = 0;
6126
6127
/* All scan results should be sent up for Mesh systems */
6128
if (hci_dev_test_flag(hdev, HCI_MESH)) {
6129
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6130
rssi, flags, data, len, NULL, 0, instant);
6131
return;
6132
}
6133
6134
/* Passive scanning shouldn't trigger any device found events,
6135
* except for devices marked as CONN_REPORT for which we do send
6136
* device found events, or advertisement monitoring requested.
6137
*/
6138
if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6139
if (type == LE_ADV_DIRECT_IND)
6140
return;
6141
6142
if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6143
bdaddr, bdaddr_type) &&
6144
idr_is_empty(&hdev->adv_monitors_idr))
6145
return;
6146
6147
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6148
rssi, flags, data, len, NULL, 0, 0);
6149
return;
6150
}
6151
6152
/* When receiving a scan response, then there is no way to
6153
* know if the remote device is connectable or not. However
6154
* since scan responses are merged with a previously seen
6155
* advertising report, the flags field from that report
6156
* will be used.
6157
*
6158
* In the unlikely case that a controller just sends a scan
6159
* response event that doesn't match the pending report, then
6160
* it is marked as a standalone SCAN_RSP.
6161
*/
6162
if (type == LE_ADV_SCAN_RSP)
6163
flags = MGMT_DEV_FOUND_SCAN_RSP;
6164
6165
/* If there's nothing pending either store the data from this
6166
* event or send an immediate device found event if the data
6167
* should not be stored for later.
6168
*/
6169
if (!has_pending_adv_report(hdev)) {
6170
/* If the report will trigger a SCAN_REQ store it for
6171
* later merging.
6172
*/
6173
if (!ext_adv && (type == LE_ADV_IND ||
6174
type == LE_ADV_SCAN_IND)) {
6175
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6176
rssi, flags, data, len);
6177
return;
6178
}
6179
6180
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6181
rssi, flags, data, len, NULL, 0, 0);
6182
return;
6183
}
6184
6185
/* Check if the pending report is for the same device as the new one */
6186
match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6187
bdaddr_type == d->last_adv_addr_type);
6188
6189
/* If the pending data doesn't match this report or this isn't a
6190
* scan response (e.g. we got a duplicate ADV_IND) then force
6191
* sending of the pending data.
6192
*/
6193
if (type != LE_ADV_SCAN_RSP || !match) {
6194
/* Send out whatever is in the cache, but skip duplicates */
6195
if (!match)
6196
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6197
d->last_adv_addr_type, NULL,
6198
d->last_adv_rssi, d->last_adv_flags,
6199
d->last_adv_data,
6200
d->last_adv_data_len, NULL, 0, 0);
6201
6202
/* If the new report will trigger a SCAN_REQ store it for
6203
* later merging.
6204
*/
6205
if (!ext_adv && (type == LE_ADV_IND ||
6206
type == LE_ADV_SCAN_IND)) {
6207
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6208
rssi, flags, data, len);
6209
return;
6210
}
6211
6212
/* The advertising reports cannot be merged, so clear
6213
* the pending report and send out a device found event.
6214
*/
6215
clear_pending_adv_report(hdev);
6216
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6217
rssi, flags, data, len, NULL, 0, 0);
6218
return;
6219
}
6220
6221
/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6222
* the new event is a SCAN_RSP. We can therefore proceed with
6223
* sending a merged device found event.
6224
*/
6225
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6226
d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6227
d->last_adv_data, d->last_adv_data_len, data, len, 0);
6228
clear_pending_adv_report(hdev);
6229
}
6230
6231
static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6232
struct sk_buff *skb)
6233
{
6234
struct hci_ev_le_advertising_report *ev = data;
6235
u64 instant = jiffies;
6236
6237
if (!ev->num)
6238
return;
6239
6240
hci_dev_lock(hdev);
6241
6242
while (ev->num--) {
6243
struct hci_ev_le_advertising_info *info;
6244
s8 rssi;
6245
6246
info = hci_le_ev_skb_pull(hdev, skb,
6247
HCI_EV_LE_ADVERTISING_REPORT,
6248
sizeof(*info));
6249
if (!info)
6250
break;
6251
6252
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6253
info->length + 1))
6254
break;
6255
6256
if (info->length <= max_adv_len(hdev)) {
6257
rssi = info->data[info->length];
6258
process_adv_report(hdev, info->type, &info->bdaddr,
6259
info->bdaddr_type, NULL, 0,
6260
HCI_ADV_PHY_1M, 0, rssi,
6261
info->data, info->length, false,
6262
false, instant);
6263
} else {
6264
bt_dev_err(hdev, "Dropping invalid advertising data");
6265
}
6266
}
6267
6268
hci_dev_unlock(hdev);
6269
}
6270
6271
static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6272
{
6273
u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
6274
6275
if (!pdu_type)
6276
return LE_ADV_NONCONN_IND;
6277
6278
if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6279
switch (evt_type) {
6280
case LE_LEGACY_ADV_IND:
6281
return LE_ADV_IND;
6282
case LE_LEGACY_ADV_DIRECT_IND:
6283
return LE_ADV_DIRECT_IND;
6284
case LE_LEGACY_ADV_SCAN_IND:
6285
return LE_ADV_SCAN_IND;
6286
case LE_LEGACY_NONCONN_IND:
6287
return LE_ADV_NONCONN_IND;
6288
case LE_LEGACY_SCAN_RSP_ADV:
6289
case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6290
return LE_ADV_SCAN_RSP;
6291
}
6292
6293
goto invalid;
6294
}
6295
6296
if (evt_type & LE_EXT_ADV_CONN_IND) {
6297
if (evt_type & LE_EXT_ADV_DIRECT_IND)
6298
return LE_ADV_DIRECT_IND;
6299
6300
return LE_ADV_IND;
6301
}
6302
6303
if (evt_type & LE_EXT_ADV_SCAN_RSP)
6304
return LE_ADV_SCAN_RSP;
6305
6306
if (evt_type & LE_EXT_ADV_SCAN_IND)
6307
return LE_ADV_SCAN_IND;
6308
6309
if (evt_type & LE_EXT_ADV_DIRECT_IND)
6310
return LE_ADV_NONCONN_IND;
6311
6312
invalid:
6313
bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6314
evt_type);
6315
6316
return LE_ADV_INVALID;
6317
}
6318
6319
static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6320
struct sk_buff *skb)
6321
{
6322
struct hci_ev_le_ext_adv_report *ev = data;
6323
u64 instant = jiffies;
6324
6325
if (!ev->num)
6326
return;
6327
6328
hci_dev_lock(hdev);
6329
6330
while (ev->num--) {
6331
struct hci_ev_le_ext_adv_info *info;
6332
u8 legacy_evt_type;
6333
u16 evt_type;
6334
6335
info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6336
sizeof(*info));
6337
if (!info)
6338
break;
6339
6340
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6341
info->length))
6342
break;
6343
6344
evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6345
legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6346
6347
if (hci_test_quirk(hdev,
6348
HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
6349
info->primary_phy &= 0x1f;
6350
info->secondary_phy &= 0x1f;
6351
}
6352
6353
/* Check if PA Sync is pending and if the hci_conn SID has not
6354
* been set update it.
6355
*/
6356
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
6357
struct hci_conn *conn;
6358
6359
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6360
if (conn && conn->sid == HCI_SID_INVALID)
6361
conn->sid = info->sid;
6362
}
6363
6364
if (legacy_evt_type != LE_ADV_INVALID) {
6365
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6366
info->bdaddr_type, NULL, 0,
6367
info->primary_phy,
6368
info->secondary_phy,
6369
info->rssi, info->data, info->length,
6370
!(evt_type & LE_EXT_ADV_LEGACY_PDU),
6371
false, instant);
6372
}
6373
}
6374
6375
hci_dev_unlock(hdev);
6376
}
6377
6378
static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6379
{
6380
struct hci_cp_le_pa_term_sync cp;
6381
6382
memset(&cp, 0, sizeof(cp));
6383
cp.handle = handle;
6384
6385
return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6386
}
6387
6388
static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
6389
struct sk_buff *skb)
6390
{
6391
struct hci_ev_le_pa_sync_established *ev = data;
6392
int mask = hdev->link_mode;
6393
__u8 flags = 0;
6394
struct hci_conn *pa_sync, *conn;
6395
6396
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6397
6398
hci_dev_lock(hdev);
6399
6400
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6401
6402
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6403
if (!conn) {
6404
bt_dev_err(hdev,
6405
"Unable to find connection for dst %pMR sid 0x%2.2x",
6406
&ev->bdaddr, ev->sid);
6407
goto unlock;
6408
}
6409
6410
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6411
6412
conn->sync_handle = le16_to_cpu(ev->handle);
6413
conn->sid = HCI_SID_INVALID;
6414
6415
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6416
&flags);
6417
if (!(mask & HCI_LM_ACCEPT)) {
6418
hci_le_pa_term_sync(hdev, ev->handle);
6419
goto unlock;
6420
}
6421
6422
if (!(flags & HCI_PROTO_DEFER))
6423
goto unlock;
6424
6425
/* Add connection to indicate PA sync event */
6426
pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
6427
HCI_ROLE_SLAVE);
6428
6429
if (IS_ERR(pa_sync))
6430
goto unlock;
6431
6432
pa_sync->sync_handle = le16_to_cpu(ev->handle);
6433
6434
if (ev->status) {
6435
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6436
6437
/* Notify iso layer */
6438
hci_connect_cfm(pa_sync, ev->status);
6439
}
6440
6441
unlock:
6442
hci_dev_unlock(hdev);
6443
}
6444
6445
static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6446
struct sk_buff *skb)
6447
{
6448
struct hci_ev_le_per_adv_report *ev = data;
6449
int mask = hdev->link_mode;
6450
__u8 flags = 0;
6451
struct hci_conn *pa_sync;
6452
6453
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6454
6455
hci_dev_lock(hdev);
6456
6457
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
6458
if (!(mask & HCI_LM_ACCEPT))
6459
goto unlock;
6460
6461
if (!(flags & HCI_PROTO_DEFER))
6462
goto unlock;
6463
6464
pa_sync = hci_conn_hash_lookup_pa_sync_handle
6465
(hdev,
6466
le16_to_cpu(ev->sync_handle));
6467
6468
if (!pa_sync)
6469
goto unlock;
6470
6471
if (ev->data_status == LE_PA_DATA_COMPLETE &&
6472
!test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6473
/* Notify iso layer */
6474
hci_connect_cfm(pa_sync, 0);
6475
6476
/* Notify MGMT layer */
6477
mgmt_device_connected(hdev, pa_sync, NULL, 0);
6478
}
6479
6480
unlock:
6481
hci_dev_unlock(hdev);
6482
}
6483
6484
static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6485
struct sk_buff *skb)
6486
{
6487
struct hci_ev_le_remote_feat_complete *ev = data;
6488
struct hci_conn *conn;
6489
6490
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6491
6492
hci_dev_lock(hdev);
6493
6494
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6495
if (conn) {
6496
if (!ev->status)
6497
memcpy(conn->features[0], ev->features, 8);
6498
6499
if (conn->state == BT_CONFIG) {
6500
__u8 status;
6501
6502
/* If the local controller supports peripheral-initiated
6503
* features exchange, but the remote controller does
6504
* not, then it is possible that the error code 0x1a
6505
* for unsupported remote feature gets returned.
6506
*
6507
* In this specific case, allow the connection to
6508
* transition into connected state and mark it as
6509
* successful.
6510
*/
6511
if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6512
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6513
status = 0x00;
6514
else
6515
status = ev->status;
6516
6517
conn->state = BT_CONNECTED;
6518
hci_connect_cfm(conn, status);
6519
hci_conn_drop(conn);
6520
}
6521
}
6522
6523
hci_dev_unlock(hdev);
6524
}
6525
6526
static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6527
struct sk_buff *skb)
6528
{
6529
struct hci_ev_le_ltk_req *ev = data;
6530
struct hci_cp_le_ltk_reply cp;
6531
struct hci_cp_le_ltk_neg_reply neg;
6532
struct hci_conn *conn;
6533
struct smp_ltk *ltk;
6534
6535
bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6536
6537
hci_dev_lock(hdev);
6538
6539
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6540
if (conn == NULL)
6541
goto not_found;
6542
6543
ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6544
if (!ltk)
6545
goto not_found;
6546
6547
if (smp_ltk_is_sc(ltk)) {
6548
/* With SC both EDiv and Rand are set to zero */
6549
if (ev->ediv || ev->rand)
6550
goto not_found;
6551
} else {
6552
/* For non-SC keys check that EDiv and Rand match */
6553
if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6554
goto not_found;
6555
}
6556
6557
memcpy(cp.ltk, ltk->val, ltk->enc_size);
6558
memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6559
cp.handle = cpu_to_le16(conn->handle);
6560
6561
conn->pending_sec_level = smp_ltk_sec_level(ltk);
6562
6563
conn->enc_key_size = ltk->enc_size;
6564
6565
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6566
6567
/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6568
* temporary key used to encrypt a connection following
6569
* pairing. It is used during the Encrypted Session Setup to
6570
* distribute the keys. Later, security can be re-established
6571
* using a distributed LTK.
6572
*/
6573
if (ltk->type == SMP_STK) {
6574
set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6575
list_del_rcu(&ltk->list);
6576
kfree_rcu(ltk, rcu);
6577
} else {
6578
clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6579
}
6580
6581
hci_dev_unlock(hdev);
6582
6583
return;
6584
6585
not_found:
6586
neg.handle = ev->handle;
6587
hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6588
hci_dev_unlock(hdev);
6589
}
6590
6591
static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6592
u8 reason)
6593
{
6594
struct hci_cp_le_conn_param_req_neg_reply cp;
6595
6596
cp.handle = cpu_to_le16(handle);
6597
cp.reason = reason;
6598
6599
hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6600
&cp);
6601
}
6602
6603
static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6604
struct sk_buff *skb)
6605
{
6606
struct hci_ev_le_remote_conn_param_req *ev = data;
6607
struct hci_cp_le_conn_param_req_reply cp;
6608
struct hci_conn *hcon;
6609
u16 handle, min, max, latency, timeout;
6610
6611
bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6612
6613
handle = le16_to_cpu(ev->handle);
6614
min = le16_to_cpu(ev->interval_min);
6615
max = le16_to_cpu(ev->interval_max);
6616
latency = le16_to_cpu(ev->latency);
6617
timeout = le16_to_cpu(ev->timeout);
6618
6619
hcon = hci_conn_hash_lookup_handle(hdev, handle);
6620
if (!hcon || hcon->state != BT_CONNECTED)
6621
return send_conn_param_neg_reply(hdev, handle,
6622
HCI_ERROR_UNKNOWN_CONN_ID);
6623
6624
if (max > hcon->le_conn_max_interval)
6625
return send_conn_param_neg_reply(hdev, handle,
6626
HCI_ERROR_INVALID_LL_PARAMS);
6627
6628
if (hci_check_conn_params(min, max, latency, timeout))
6629
return send_conn_param_neg_reply(hdev, handle,
6630
HCI_ERROR_INVALID_LL_PARAMS);
6631
6632
if (hcon->role == HCI_ROLE_MASTER) {
6633
struct hci_conn_params *params;
6634
u8 store_hint;
6635
6636
hci_dev_lock(hdev);
6637
6638
params = hci_conn_params_lookup(hdev, &hcon->dst,
6639
hcon->dst_type);
6640
if (params) {
6641
params->conn_min_interval = min;
6642
params->conn_max_interval = max;
6643
params->conn_latency = latency;
6644
params->supervision_timeout = timeout;
6645
store_hint = 0x01;
6646
} else {
6647
store_hint = 0x00;
6648
}
6649
6650
hci_dev_unlock(hdev);
6651
6652
mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6653
store_hint, min, max, latency, timeout);
6654
}
6655
6656
cp.handle = ev->handle;
6657
cp.interval_min = ev->interval_min;
6658
cp.interval_max = ev->interval_max;
6659
cp.latency = ev->latency;
6660
cp.timeout = ev->timeout;
6661
cp.min_ce_len = 0;
6662
cp.max_ce_len = 0;
6663
6664
hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6665
}
6666
6667
static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6668
struct sk_buff *skb)
6669
{
6670
struct hci_ev_le_direct_adv_report *ev = data;
6671
u64 instant = jiffies;
6672
int i;
6673
6674
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6675
flex_array_size(ev, info, ev->num)))
6676
return;
6677
6678
if (!ev->num)
6679
return;
6680
6681
hci_dev_lock(hdev);
6682
6683
for (i = 0; i < ev->num; i++) {
6684
struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6685
6686
process_adv_report(hdev, info->type, &info->bdaddr,
6687
info->bdaddr_type, &info->direct_addr,
6688
info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6689
info->rssi, NULL, 0, false, false, instant);
6690
}
6691
6692
hci_dev_unlock(hdev);
6693
}
6694
6695
static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6696
struct sk_buff *skb)
6697
{
6698
struct hci_ev_le_phy_update_complete *ev = data;
6699
struct hci_conn *conn;
6700
6701
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6702
6703
if (ev->status)
6704
return;
6705
6706
hci_dev_lock(hdev);
6707
6708
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6709
if (!conn)
6710
goto unlock;
6711
6712
conn->le_tx_phy = ev->tx_phy;
6713
conn->le_rx_phy = ev->rx_phy;
6714
6715
unlock:
6716
hci_dev_unlock(hdev);
6717
}
6718
6719
static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
6720
struct sk_buff *skb)
6721
{
6722
struct hci_evt_le_cis_established *ev = data;
6723
struct hci_conn *conn;
6724
struct bt_iso_qos *qos;
6725
bool pending = false;
6726
u16 handle = __le16_to_cpu(ev->handle);
6727
u32 c_sdu_interval, p_sdu_interval;
6728
6729
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6730
6731
hci_dev_lock(hdev);
6732
6733
conn = hci_conn_hash_lookup_handle(hdev, handle);
6734
if (!conn) {
6735
bt_dev_err(hdev,
6736
"Unable to find connection with handle 0x%4.4x",
6737
handle);
6738
goto unlock;
6739
}
6740
6741
if (conn->type != CIS_LINK) {
6742
bt_dev_err(hdev,
6743
"Invalid connection link type handle 0x%4.4x",
6744
handle);
6745
goto unlock;
6746
}
6747
6748
qos = &conn->iso_qos;
6749
6750
pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6751
6752
/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6753
* page 3075:
6754
* Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6755
* ISO_Interval + SDU_Interval_C_To_P
6756
* ...
6757
* SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6758
* Transport_Latency
6759
*/
6760
c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6761
(ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6762
get_unaligned_le24(ev->c_latency);
6763
p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6764
(ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6765
get_unaligned_le24(ev->p_latency);
6766
6767
switch (conn->role) {
6768
case HCI_ROLE_SLAVE:
6769
qos->ucast.in.interval = c_sdu_interval;
6770
qos->ucast.out.interval = p_sdu_interval;
6771
/* Convert Transport Latency (us) to Latency (msec) */
6772
qos->ucast.in.latency =
6773
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6774
1000);
6775
qos->ucast.out.latency =
6776
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6777
1000);
6778
qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6779
qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6780
qos->ucast.in.phy = ev->c_phy;
6781
qos->ucast.out.phy = ev->p_phy;
6782
break;
6783
case HCI_ROLE_MASTER:
6784
qos->ucast.in.interval = p_sdu_interval;
6785
qos->ucast.out.interval = c_sdu_interval;
6786
/* Convert Transport Latency (us) to Latency (msec) */
6787
qos->ucast.out.latency =
6788
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6789
1000);
6790
qos->ucast.in.latency =
6791
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6792
1000);
6793
qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6794
qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6795
qos->ucast.out.phy = ev->c_phy;
6796
qos->ucast.in.phy = ev->p_phy;
6797
break;
6798
}
6799
6800
if (!ev->status) {
6801
conn->state = BT_CONNECTED;
6802
hci_debugfs_create_conn(conn);
6803
hci_conn_add_sysfs(conn);
6804
hci_iso_setup_path(conn);
6805
goto unlock;
6806
}
6807
6808
conn->state = BT_CLOSED;
6809
hci_connect_cfm(conn, ev->status);
6810
hci_conn_del(conn);
6811
6812
unlock:
6813
if (pending)
6814
hci_le_create_cis_pending(hdev);
6815
6816
hci_dev_unlock(hdev);
6817
}
6818
6819
static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6820
{
6821
struct hci_cp_le_reject_cis cp;
6822
6823
memset(&cp, 0, sizeof(cp));
6824
cp.handle = handle;
6825
cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6826
hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6827
}
6828
6829
static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6830
{
6831
struct hci_cp_le_accept_cis cp;
6832
6833
memset(&cp, 0, sizeof(cp));
6834
cp.handle = handle;
6835
hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6836
}
6837
6838
static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6839
struct sk_buff *skb)
6840
{
6841
struct hci_evt_le_cis_req *ev = data;
6842
u16 acl_handle, cis_handle;
6843
struct hci_conn *acl, *cis;
6844
int mask;
6845
__u8 flags = 0;
6846
6847
acl_handle = __le16_to_cpu(ev->acl_handle);
6848
cis_handle = __le16_to_cpu(ev->cis_handle);
6849
6850
bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6851
acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6852
6853
hci_dev_lock(hdev);
6854
6855
acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6856
if (!acl)
6857
goto unlock;
6858
6859
mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags);
6860
if (!(mask & HCI_LM_ACCEPT)) {
6861
hci_le_reject_cis(hdev, ev->cis_handle);
6862
goto unlock;
6863
}
6864
6865
cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6866
if (!cis) {
6867
cis = hci_conn_add(hdev, CIS_LINK, &acl->dst,
6868
HCI_ROLE_SLAVE, cis_handle);
6869
if (IS_ERR(cis)) {
6870
hci_le_reject_cis(hdev, ev->cis_handle);
6871
goto unlock;
6872
}
6873
}
6874
6875
cis->iso_qos.ucast.cig = ev->cig_id;
6876
cis->iso_qos.ucast.cis = ev->cis_id;
6877
6878
if (!(flags & HCI_PROTO_DEFER)) {
6879
hci_le_accept_cis(hdev, ev->cis_handle);
6880
} else {
6881
cis->state = BT_CONNECT2;
6882
hci_connect_cfm(cis, 0);
6883
}
6884
6885
unlock:
6886
hci_dev_unlock(hdev);
6887
}
6888
6889
static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6890
{
6891
u8 handle = PTR_UINT(data);
6892
6893
return hci_le_terminate_big_sync(hdev, handle,
6894
HCI_ERROR_LOCAL_HOST_TERM);
6895
}
6896
6897
static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6898
struct sk_buff *skb)
6899
{
6900
struct hci_evt_le_create_big_complete *ev = data;
6901
struct hci_conn *conn;
6902
__u8 i = 0;
6903
6904
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6905
6906
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6907
flex_array_size(ev, bis_handle, ev->num_bis)))
6908
return;
6909
6910
hci_dev_lock(hdev);
6911
6912
/* Connect all BISes that are bound to the BIG */
6913
while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
6914
BT_BOUND,
6915
HCI_ROLE_MASTER))) {
6916
if (ev->status) {
6917
hci_connect_cfm(conn, ev->status);
6918
hci_conn_del(conn);
6919
continue;
6920
}
6921
6922
if (hci_conn_set_handle(conn,
6923
__le16_to_cpu(ev->bis_handle[i++])))
6924
continue;
6925
6926
conn->state = BT_CONNECTED;
6927
set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6928
hci_debugfs_create_conn(conn);
6929
hci_conn_add_sysfs(conn);
6930
hci_iso_setup_path(conn);
6931
}
6932
6933
if (!ev->status && !i)
6934
/* If no BISes have been connected for the BIG,
6935
* terminate. This is in case all bound connections
6936
* have been closed before the BIG creation
6937
* has completed.
6938
*/
6939
hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6940
UINT_PTR(ev->handle), NULL);
6941
6942
hci_dev_unlock(hdev);
6943
}
6944
6945
static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6946
struct sk_buff *skb)
6947
{
6948
struct hci_evt_le_big_sync_established *ev = data;
6949
struct hci_conn *bis, *conn;
6950
int i;
6951
6952
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6953
6954
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
6955
flex_array_size(ev, bis, ev->num_bis)))
6956
return;
6957
6958
hci_dev_lock(hdev);
6959
6960
conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
6961
ev->num_bis);
6962
if (!conn) {
6963
bt_dev_err(hdev,
6964
"Unable to find connection for big 0x%2.2x",
6965
ev->handle);
6966
goto unlock;
6967
}
6968
6969
clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
6970
6971
conn->num_bis = 0;
6972
memset(conn->bis, 0, sizeof(conn->num_bis));
6973
6974
for (i = 0; i < ev->num_bis; i++) {
6975
u16 handle = le16_to_cpu(ev->bis[i]);
6976
__le32 interval;
6977
6978
bis = hci_conn_hash_lookup_handle(hdev, handle);
6979
if (!bis) {
6980
if (handle > HCI_CONN_HANDLE_MAX) {
6981
bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6982
continue;
6983
}
6984
bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY,
6985
HCI_ROLE_SLAVE, handle);
6986
if (IS_ERR(bis))
6987
continue;
6988
}
6989
6990
if (ev->status != 0x42) {
6991
/* Mark PA sync as established */
6992
set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6993
/* Reset cleanup callback of PA Sync so it doesn't
6994
* terminate the sync when deleting the connection.
6995
*/
6996
conn->cleanup = NULL;
6997
}
6998
6999
bis->sync_handle = conn->sync_handle;
7000
bis->iso_qos.bcast.big = ev->handle;
7001
memset(&interval, 0, sizeof(interval));
7002
memcpy(&interval, ev->latency, sizeof(ev->latency));
7003
bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7004
/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7005
bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7006
bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7007
7008
if (!ev->status) {
7009
bis->state = BT_CONNECTED;
7010
set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7011
hci_debugfs_create_conn(bis);
7012
hci_conn_add_sysfs(bis);
7013
hci_iso_setup_path(bis);
7014
}
7015
}
7016
7017
/* In case BIG sync failed, notify each failed connection to
7018
* the user after all hci connections have been added
7019
*/
7020
if (ev->status)
7021
for (i = 0; i < ev->num_bis; i++) {
7022
u16 handle = le16_to_cpu(ev->bis[i]);
7023
7024
bis = hci_conn_hash_lookup_handle(hdev, handle);
7025
if (!bis)
7026
continue;
7027
7028
set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7029
hci_connect_cfm(bis, ev->status);
7030
}
7031
7032
unlock:
7033
hci_dev_unlock(hdev);
7034
}
7035
7036
static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
7037
struct sk_buff *skb)
7038
{
7039
struct hci_evt_le_big_sync_lost *ev = data;
7040
struct hci_conn *bis, *conn;
7041
bool mgmt_conn;
7042
7043
bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
7044
7045
hci_dev_lock(hdev);
7046
7047
/* Delete the pa sync connection */
7048
bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7049
if (bis) {
7050
conn = hci_conn_hash_lookup_pa_sync_handle(hdev,
7051
bis->sync_handle);
7052
if (conn)
7053
hci_conn_del(conn);
7054
}
7055
7056
/* Delete each bis connection */
7057
while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7058
BT_CONNECTED,
7059
HCI_ROLE_SLAVE))) {
7060
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags);
7061
mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type,
7062
ev->reason, mgmt_conn);
7063
7064
clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7065
hci_disconn_cfm(bis, ev->reason);
7066
hci_conn_del(bis);
7067
}
7068
7069
hci_dev_unlock(hdev);
7070
}
7071
7072
static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7073
struct sk_buff *skb)
7074
{
7075
struct hci_evt_le_big_info_adv_report *ev = data;
7076
int mask = hdev->link_mode;
7077
__u8 flags = 0;
7078
struct hci_conn *pa_sync;
7079
7080
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7081
7082
hci_dev_lock(hdev);
7083
7084
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
7085
if (!(mask & HCI_LM_ACCEPT))
7086
goto unlock;
7087
7088
if (!(flags & HCI_PROTO_DEFER))
7089
goto unlock;
7090
7091
pa_sync = hci_conn_hash_lookup_pa_sync_handle
7092
(hdev,
7093
le16_to_cpu(ev->sync_handle));
7094
7095
if (!pa_sync)
7096
goto unlock;
7097
7098
pa_sync->iso_qos.bcast.encryption = ev->encryption;
7099
7100
/* Notify iso layer */
7101
hci_connect_cfm(pa_sync, 0);
7102
7103
unlock:
7104
hci_dev_unlock(hdev);
7105
}
7106
7107
#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7108
[_op] = { \
7109
.func = _func, \
7110
.min_len = _min_len, \
7111
.max_len = _max_len, \
7112
}
7113
7114
#define HCI_LE_EV(_op, _func, _len) \
7115
HCI_LE_EV_VL(_op, _func, _len, _len)
7116
7117
#define HCI_LE_EV_STATUS(_op, _func) \
7118
HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7119
7120
/* Entries in this table shall have their position according to the subevent
7121
* opcode they handle so the use of the macros above is recommend since it does
7122
* attempt to initialize at its proper index using Designated Initializers that
7123
* way events without a callback function can be omitted.
7124
*/
7125
static const struct hci_le_ev {
7126
void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7127
u16 min_len;
7128
u16 max_len;
7129
} hci_le_ev_table[U8_MAX + 1] = {
7130
/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7131
HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7132
sizeof(struct hci_ev_le_conn_complete)),
7133
/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7134
HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7135
sizeof(struct hci_ev_le_advertising_report),
7136
HCI_MAX_EVENT_SIZE),
7137
/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7138
HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7139
hci_le_conn_update_complete_evt,
7140
sizeof(struct hci_ev_le_conn_update_complete)),
7141
/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7142
HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7143
hci_le_remote_feat_complete_evt,
7144
sizeof(struct hci_ev_le_remote_feat_complete)),
7145
/* [0x05 = HCI_EV_LE_LTK_REQ] */
7146
HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7147
sizeof(struct hci_ev_le_ltk_req)),
7148
/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7149
HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7150
hci_le_remote_conn_param_req_evt,
7151
sizeof(struct hci_ev_le_remote_conn_param_req)),
7152
/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7153
HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7154
hci_le_enh_conn_complete_evt,
7155
sizeof(struct hci_ev_le_enh_conn_complete)),
7156
/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7157
HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7158
sizeof(struct hci_ev_le_direct_adv_report),
7159
HCI_MAX_EVENT_SIZE),
7160
/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7161
HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7162
sizeof(struct hci_ev_le_phy_update_complete)),
7163
/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7164
HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7165
sizeof(struct hci_ev_le_ext_adv_report),
7166
HCI_MAX_EVENT_SIZE),
7167
/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7168
HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7169
hci_le_pa_sync_established_evt,
7170
sizeof(struct hci_ev_le_pa_sync_established)),
7171
/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7172
HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7173
hci_le_per_adv_report_evt,
7174
sizeof(struct hci_ev_le_per_adv_report),
7175
HCI_MAX_EVENT_SIZE),
7176
/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7177
HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7178
sizeof(struct hci_evt_le_ext_adv_set_term)),
7179
/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7180
HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
7181
sizeof(struct hci_evt_le_cis_established)),
7182
/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7183
HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7184
sizeof(struct hci_evt_le_cis_req)),
7185
/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7186
HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7187
hci_le_create_big_complete_evt,
7188
sizeof(struct hci_evt_le_create_big_complete),
7189
HCI_MAX_EVENT_SIZE),
7190
/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
7191
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7192
hci_le_big_sync_established_evt,
7193
sizeof(struct hci_evt_le_big_sync_established),
7194
HCI_MAX_EVENT_SIZE),
7195
/* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
7196
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
7197
hci_le_big_sync_lost_evt,
7198
sizeof(struct hci_evt_le_big_sync_lost),
7199
HCI_MAX_EVENT_SIZE),
7200
/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7201
HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7202
hci_le_big_info_adv_report_evt,
7203
sizeof(struct hci_evt_le_big_info_adv_report),
7204
HCI_MAX_EVENT_SIZE),
7205
};
7206
7207
static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7208
struct sk_buff *skb, u16 *opcode, u8 *status,
7209
hci_req_complete_t *req_complete,
7210
hci_req_complete_skb_t *req_complete_skb)
7211
{
7212
struct hci_ev_le_meta *ev = data;
7213
const struct hci_le_ev *subev;
7214
7215
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7216
7217
/* Only match event if command OGF is for LE */
7218
if (hdev->req_skb &&
7219
(hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
7220
hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
7221
hci_skb_event(hdev->req_skb) == ev->subevent) {
7222
*opcode = hci_skb_opcode(hdev->req_skb);
7223
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7224
req_complete_skb);
7225
}
7226
7227
subev = &hci_le_ev_table[ev->subevent];
7228
if (!subev->func)
7229
return;
7230
7231
if (skb->len < subev->min_len) {
7232
bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7233
ev->subevent, skb->len, subev->min_len);
7234
return;
7235
}
7236
7237
/* Just warn if the length is over max_len size it still be
7238
* possible to partially parse the event so leave to callback to
7239
* decide if that is acceptable.
7240
*/
7241
if (skb->len > subev->max_len)
7242
bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7243
ev->subevent, skb->len, subev->max_len);
7244
data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7245
if (!data)
7246
return;
7247
7248
subev->func(hdev, data, skb);
7249
}
7250
7251
static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7252
u8 event, struct sk_buff *skb)
7253
{
7254
struct hci_ev_cmd_complete *ev;
7255
struct hci_event_hdr *hdr;
7256
7257
if (!skb)
7258
return false;
7259
7260
hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7261
if (!hdr)
7262
return false;
7263
7264
if (event) {
7265
if (hdr->evt != event)
7266
return false;
7267
return true;
7268
}
7269
7270
/* Check if request ended in Command Status - no way to retrieve
7271
* any extra parameters in this case.
7272
*/
7273
if (hdr->evt == HCI_EV_CMD_STATUS)
7274
return false;
7275
7276
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7277
bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7278
hdr->evt);
7279
return false;
7280
}
7281
7282
ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7283
if (!ev)
7284
return false;
7285
7286
if (opcode != __le16_to_cpu(ev->opcode)) {
7287
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7288
__le16_to_cpu(ev->opcode));
7289
return false;
7290
}
7291
7292
return true;
7293
}
7294
7295
static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7296
struct sk_buff *skb)
7297
{
7298
struct hci_ev_le_advertising_info *adv;
7299
struct hci_ev_le_direct_adv_info *direct_adv;
7300
struct hci_ev_le_ext_adv_info *ext_adv;
7301
const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7302
const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7303
7304
hci_dev_lock(hdev);
7305
7306
/* If we are currently suspended and this is the first BT event seen,
7307
* save the wake reason associated with the event.
7308
*/
7309
if (!hdev->suspended || hdev->wake_reason)
7310
goto unlock;
7311
7312
/* Default to remote wake. Values for wake_reason are documented in the
7313
* Bluez mgmt api docs.
7314
*/
7315
hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7316
7317
/* Once configured for remote wakeup, we should only wake up for
7318
* reconnections. It's useful to see which device is waking us up so
7319
* keep track of the bdaddr of the connection event that woke us up.
7320
*/
7321
if (event == HCI_EV_CONN_REQUEST) {
7322
bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7323
hdev->wake_addr_type = BDADDR_BREDR;
7324
} else if (event == HCI_EV_CONN_COMPLETE) {
7325
bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7326
hdev->wake_addr_type = BDADDR_BREDR;
7327
} else if (event == HCI_EV_LE_META) {
7328
struct hci_ev_le_meta *le_ev = (void *)skb->data;
7329
u8 subevent = le_ev->subevent;
7330
u8 *ptr = &skb->data[sizeof(*le_ev)];
7331
u8 num_reports = *ptr;
7332
7333
if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7334
subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7335
subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7336
num_reports) {
7337
adv = (void *)(ptr + 1);
7338
direct_adv = (void *)(ptr + 1);
7339
ext_adv = (void *)(ptr + 1);
7340
7341
switch (subevent) {
7342
case HCI_EV_LE_ADVERTISING_REPORT:
7343
bacpy(&hdev->wake_addr, &adv->bdaddr);
7344
hdev->wake_addr_type = adv->bdaddr_type;
7345
break;
7346
case HCI_EV_LE_DIRECT_ADV_REPORT:
7347
bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7348
hdev->wake_addr_type = direct_adv->bdaddr_type;
7349
break;
7350
case HCI_EV_LE_EXT_ADV_REPORT:
7351
bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7352
hdev->wake_addr_type = ext_adv->bdaddr_type;
7353
break;
7354
}
7355
}
7356
} else {
7357
hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7358
}
7359
7360
unlock:
7361
hci_dev_unlock(hdev);
7362
}
7363
7364
#define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7365
[_op] = { \
7366
.req = false, \
7367
.func = _func, \
7368
.min_len = _min_len, \
7369
.max_len = _max_len, \
7370
}
7371
7372
#define HCI_EV(_op, _func, _len) \
7373
HCI_EV_VL(_op, _func, _len, _len)
7374
7375
#define HCI_EV_STATUS(_op, _func) \
7376
HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7377
7378
#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7379
[_op] = { \
7380
.req = true, \
7381
.func_req = _func, \
7382
.min_len = _min_len, \
7383
.max_len = _max_len, \
7384
}
7385
7386
#define HCI_EV_REQ(_op, _func, _len) \
7387
HCI_EV_REQ_VL(_op, _func, _len, _len)
7388
7389
/* Entries in this table shall have their position according to the event opcode
7390
* they handle so the use of the macros above is recommend since it does attempt
7391
* to initialize at its proper index using Designated Initializers that way
7392
* events without a callback function don't have entered.
7393
*/
7394
static const struct hci_ev {
7395
bool req;
7396
union {
7397
void (*func)(struct hci_dev *hdev, void *data,
7398
struct sk_buff *skb);
7399
void (*func_req)(struct hci_dev *hdev, void *data,
7400
struct sk_buff *skb, u16 *opcode, u8 *status,
7401
hci_req_complete_t *req_complete,
7402
hci_req_complete_skb_t *req_complete_skb);
7403
};
7404
u16 min_len;
7405
u16 max_len;
7406
} hci_ev_table[U8_MAX + 1] = {
7407
/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7408
HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7409
/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7410
HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7411
sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7412
/* [0x03 = HCI_EV_CONN_COMPLETE] */
7413
HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7414
sizeof(struct hci_ev_conn_complete)),
7415
/* [0x04 = HCI_EV_CONN_REQUEST] */
7416
HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7417
sizeof(struct hci_ev_conn_request)),
7418
/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7419
HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7420
sizeof(struct hci_ev_disconn_complete)),
7421
/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7422
HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7423
sizeof(struct hci_ev_auth_complete)),
7424
/* [0x07 = HCI_EV_REMOTE_NAME] */
7425
HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7426
sizeof(struct hci_ev_remote_name)),
7427
/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7428
HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7429
sizeof(struct hci_ev_encrypt_change)),
7430
/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7431
HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7432
hci_change_link_key_complete_evt,
7433
sizeof(struct hci_ev_change_link_key_complete)),
7434
/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7435
HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7436
sizeof(struct hci_ev_remote_features)),
7437
/* [0x0e = HCI_EV_CMD_COMPLETE] */
7438
HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7439
sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7440
/* [0x0f = HCI_EV_CMD_STATUS] */
7441
HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7442
sizeof(struct hci_ev_cmd_status)),
7443
/* [0x10 = HCI_EV_CMD_STATUS] */
7444
HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7445
sizeof(struct hci_ev_hardware_error)),
7446
/* [0x12 = HCI_EV_ROLE_CHANGE] */
7447
HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7448
sizeof(struct hci_ev_role_change)),
7449
/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7450
HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7451
sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7452
/* [0x14 = HCI_EV_MODE_CHANGE] */
7453
HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7454
sizeof(struct hci_ev_mode_change)),
7455
/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7456
HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7457
sizeof(struct hci_ev_pin_code_req)),
7458
/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7459
HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7460
sizeof(struct hci_ev_link_key_req)),
7461
/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7462
HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7463
sizeof(struct hci_ev_link_key_notify)),
7464
/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7465
HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7466
sizeof(struct hci_ev_clock_offset)),
7467
/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7468
HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7469
sizeof(struct hci_ev_pkt_type_change)),
7470
/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7471
HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7472
sizeof(struct hci_ev_pscan_rep_mode)),
7473
/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7474
HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7475
hci_inquiry_result_with_rssi_evt,
7476
sizeof(struct hci_ev_inquiry_result_rssi),
7477
HCI_MAX_EVENT_SIZE),
7478
/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7479
HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7480
sizeof(struct hci_ev_remote_ext_features)),
7481
/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7482
HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7483
sizeof(struct hci_ev_sync_conn_complete)),
7484
/* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7485
HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7486
hci_extended_inquiry_result_evt,
7487
sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7488
/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7489
HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7490
sizeof(struct hci_ev_key_refresh_complete)),
7491
/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7492
HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7493
sizeof(struct hci_ev_io_capa_request)),
7494
/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7495
HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7496
sizeof(struct hci_ev_io_capa_reply)),
7497
/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7498
HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7499
sizeof(struct hci_ev_user_confirm_req)),
7500
/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7501
HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7502
sizeof(struct hci_ev_user_passkey_req)),
7503
/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7504
HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7505
sizeof(struct hci_ev_remote_oob_data_request)),
7506
/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7507
HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7508
sizeof(struct hci_ev_simple_pair_complete)),
7509
/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7510
HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7511
sizeof(struct hci_ev_user_passkey_notify)),
7512
/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7513
HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7514
sizeof(struct hci_ev_keypress_notify)),
7515
/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7516
HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7517
sizeof(struct hci_ev_remote_host_features)),
7518
/* [0x3e = HCI_EV_LE_META] */
7519
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7520
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7521
/* [0xff = HCI_EV_VENDOR] */
7522
HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7523
};
7524
7525
static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7526
u16 *opcode, u8 *status,
7527
hci_req_complete_t *req_complete,
7528
hci_req_complete_skb_t *req_complete_skb)
7529
{
7530
const struct hci_ev *ev = &hci_ev_table[event];
7531
void *data;
7532
7533
if (!ev->func)
7534
return;
7535
7536
if (skb->len < ev->min_len) {
7537
bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7538
event, skb->len, ev->min_len);
7539
return;
7540
}
7541
7542
/* Just warn if the length is over max_len size it still be
7543
* possible to partially parse the event so leave to callback to
7544
* decide if that is acceptable.
7545
*/
7546
if (skb->len > ev->max_len)
7547
bt_dev_warn_ratelimited(hdev,
7548
"unexpected event 0x%2.2x length: %u > %u",
7549
event, skb->len, ev->max_len);
7550
7551
data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7552
if (!data)
7553
return;
7554
7555
if (ev->req)
7556
ev->func_req(hdev, data, skb, opcode, status, req_complete,
7557
req_complete_skb);
7558
else
7559
ev->func(hdev, data, skb);
7560
}
7561
7562
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7563
{
7564
struct hci_event_hdr *hdr = (void *) skb->data;
7565
hci_req_complete_t req_complete = NULL;
7566
hci_req_complete_skb_t req_complete_skb = NULL;
7567
struct sk_buff *orig_skb = NULL;
7568
u8 status = 0, event, req_evt = 0;
7569
u16 opcode = HCI_OP_NOP;
7570
7571
if (skb->len < sizeof(*hdr)) {
7572
bt_dev_err(hdev, "Malformed HCI Event");
7573
goto done;
7574
}
7575
7576
hci_dev_lock(hdev);
7577
kfree_skb(hdev->recv_event);
7578
hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7579
hci_dev_unlock(hdev);
7580
7581
event = hdr->evt;
7582
if (!event) {
7583
bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7584
event);
7585
goto done;
7586
}
7587
7588
/* Only match event if command OGF is not for LE */
7589
if (hdev->req_skb &&
7590
hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7591
hci_skb_event(hdev->req_skb) == event) {
7592
hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7593
status, &req_complete, &req_complete_skb);
7594
req_evt = event;
7595
}
7596
7597
/* If it looks like we might end up having to call
7598
* req_complete_skb, store a pristine copy of the skb since the
7599
* various handlers may modify the original one through
7600
* skb_pull() calls, etc.
7601
*/
7602
if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7603
event == HCI_EV_CMD_COMPLETE)
7604
orig_skb = skb_clone(skb, GFP_KERNEL);
7605
7606
skb_pull(skb, HCI_EVENT_HDR_SIZE);
7607
7608
/* Store wake reason if we're suspended */
7609
hci_store_wake_reason(hdev, event, skb);
7610
7611
bt_dev_dbg(hdev, "event 0x%2.2x", event);
7612
7613
hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7614
&req_complete_skb);
7615
7616
if (req_complete) {
7617
req_complete(hdev, status, opcode);
7618
} else if (req_complete_skb) {
7619
if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7620
kfree_skb(orig_skb);
7621
orig_skb = NULL;
7622
}
7623
req_complete_skb(hdev, status, opcode, orig_skb);
7624
}
7625
7626
done:
7627
kfree_skb(orig_skb);
7628
kfree_skb(skb);
7629
hdev->stat.evt_rx++;
7630
}
7631
7632