Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/net/bluetooth/hci_core.h
29278 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
Copyright 2023-2024 NXP
5
6
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
7
8
This program is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License version 2 as
10
published by the Free Software Foundation;
11
12
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23
SOFTWARE IS DISCLAIMED.
24
*/
25
26
#ifndef __HCI_CORE_H
27
#define __HCI_CORE_H
28
29
#include <linux/idr.h>
30
#include <linux/leds.h>
31
#include <linux/rculist.h>
32
#include <linux/spinlock.h>
33
#include <linux/srcu.h>
34
35
#include <net/bluetooth/hci.h>
36
#include <net/bluetooth/hci_drv.h>
37
#include <net/bluetooth/hci_sync.h>
38
#include <net/bluetooth/hci_sock.h>
39
#include <net/bluetooth/coredump.h>
40
41
/* HCI priority */
42
#define HCI_PRIO_MAX 7
43
44
/* HCI maximum id value */
45
#define HCI_MAX_ID 10000
46
47
/* HCI Core structures */
48
struct inquiry_data {
49
bdaddr_t bdaddr;
50
__u8 pscan_rep_mode;
51
__u8 pscan_period_mode;
52
__u8 pscan_mode;
53
__u8 dev_class[3];
54
__le16 clock_offset;
55
__s8 rssi;
56
__u8 ssp_mode;
57
};
58
59
struct inquiry_entry {
60
struct list_head all; /* inq_cache.all */
61
struct list_head list; /* unknown or resolve */
62
enum {
63
NAME_NOT_KNOWN,
64
NAME_NEEDED,
65
NAME_PENDING,
66
NAME_KNOWN,
67
} name_state;
68
__u32 timestamp;
69
struct inquiry_data data;
70
};
71
72
struct discovery_state {
73
int type;
74
enum {
75
DISCOVERY_STOPPED,
76
DISCOVERY_STARTING,
77
DISCOVERY_FINDING,
78
DISCOVERY_RESOLVING,
79
DISCOVERY_STOPPING,
80
} state;
81
struct list_head all; /* All devices found during inquiry */
82
struct list_head unknown; /* Name state not known */
83
struct list_head resolve; /* Name needs to be resolved */
84
__u32 timestamp;
85
bdaddr_t last_adv_addr;
86
u8 last_adv_addr_type;
87
s8 last_adv_rssi;
88
u32 last_adv_flags;
89
u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH];
90
u8 last_adv_data_len;
91
bool report_invalid_rssi;
92
bool result_filtering;
93
bool limited;
94
s8 rssi;
95
u16 uuid_count;
96
u8 (*uuids)[16];
97
unsigned long name_resolve_timeout;
98
spinlock_t lock;
99
};
100
101
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
102
103
enum suspend_tasks {
104
SUSPEND_PAUSE_DISCOVERY,
105
SUSPEND_UNPAUSE_DISCOVERY,
106
107
SUSPEND_PAUSE_ADVERTISING,
108
SUSPEND_UNPAUSE_ADVERTISING,
109
110
SUSPEND_SCAN_DISABLE,
111
SUSPEND_SCAN_ENABLE,
112
SUSPEND_DISCONNECTING,
113
114
SUSPEND_POWERING_DOWN,
115
116
SUSPEND_PREPARE_NOTIFIER,
117
118
SUSPEND_SET_ADV_FILTER,
119
__SUSPEND_NUM_TASKS
120
};
121
122
enum suspended_state {
123
BT_RUNNING = 0,
124
BT_SUSPEND_DISCONNECT,
125
BT_SUSPEND_CONFIGURE_WAKE,
126
};
127
128
struct hci_conn_hash {
129
struct list_head list;
130
unsigned int acl_num;
131
unsigned int sco_num;
132
unsigned int cis_num;
133
unsigned int bis_num;
134
unsigned int pa_num;
135
unsigned int le_num;
136
unsigned int le_num_peripheral;
137
};
138
139
struct bdaddr_list {
140
struct list_head list;
141
bdaddr_t bdaddr;
142
u8 bdaddr_type;
143
};
144
145
struct codec_list {
146
struct list_head list;
147
u8 id;
148
__u16 cid;
149
__u16 vid;
150
u8 transport;
151
u8 num_caps;
152
u32 len;
153
struct hci_codec_caps caps[];
154
};
155
156
struct bdaddr_list_with_irk {
157
struct list_head list;
158
bdaddr_t bdaddr;
159
u8 bdaddr_type;
160
u8 peer_irk[16];
161
u8 local_irk[16];
162
};
163
164
/* Bitmask of connection flags */
165
enum hci_conn_flags {
166
HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0),
167
HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1),
168
HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2),
169
};
170
typedef u8 hci_conn_flags_t;
171
172
struct bdaddr_list_with_flags {
173
struct list_head list;
174
bdaddr_t bdaddr;
175
u8 bdaddr_type;
176
hci_conn_flags_t flags;
177
};
178
179
struct bt_uuid {
180
struct list_head list;
181
u8 uuid[16];
182
u8 size;
183
u8 svc_hint;
184
};
185
186
struct blocked_key {
187
struct list_head list;
188
struct rcu_head rcu;
189
u8 type;
190
u8 val[16];
191
};
192
193
struct smp_csrk {
194
bdaddr_t bdaddr;
195
u8 bdaddr_type;
196
u8 type;
197
u8 val[16];
198
};
199
200
struct smp_ltk {
201
struct list_head list;
202
struct rcu_head rcu;
203
bdaddr_t bdaddr;
204
u8 bdaddr_type;
205
u8 authenticated;
206
u8 type;
207
u8 enc_size;
208
__le16 ediv;
209
__le64 rand;
210
u8 val[16];
211
};
212
213
struct smp_irk {
214
struct list_head list;
215
struct rcu_head rcu;
216
bdaddr_t rpa;
217
bdaddr_t bdaddr;
218
u8 addr_type;
219
u8 val[16];
220
};
221
222
struct link_key {
223
struct list_head list;
224
struct rcu_head rcu;
225
bdaddr_t bdaddr;
226
u8 type;
227
u8 val[HCI_LINK_KEY_SIZE];
228
u8 pin_len;
229
};
230
231
struct oob_data {
232
struct list_head list;
233
bdaddr_t bdaddr;
234
u8 bdaddr_type;
235
u8 present;
236
u8 hash192[16];
237
u8 rand192[16];
238
u8 hash256[16];
239
u8 rand256[16];
240
};
241
242
struct adv_info {
243
struct list_head list;
244
bool enabled;
245
bool pending;
246
bool periodic;
247
__u8 mesh;
248
__u8 instance;
249
__u8 handle;
250
__u8 sid;
251
__u32 flags;
252
__u16 timeout;
253
__u16 remaining_time;
254
__u16 duration;
255
__u16 adv_data_len;
256
__u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
257
bool adv_data_changed;
258
__u16 scan_rsp_len;
259
__u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
260
bool scan_rsp_changed;
261
__u16 per_adv_data_len;
262
__u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
263
__s8 tx_power;
264
__u32 min_interval;
265
__u32 max_interval;
266
bdaddr_t random_addr;
267
bool rpa_expired;
268
struct delayed_work rpa_expired_cb;
269
};
270
271
struct tx_queue {
272
struct sk_buff_head queue;
273
unsigned int extra;
274
unsigned int tracked;
275
};
276
277
#define HCI_MAX_ADV_INSTANCES 5
278
#define HCI_DEFAULT_ADV_DURATION 2
279
280
#define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F
281
282
#define DATA_CMP(_d1, _l1, _d2, _l2) \
283
(_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2)
284
285
#define ADV_DATA_CMP(_adv, _data, _len) \
286
DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len)
287
288
#define SCAN_RSP_CMP(_adv, _data, _len) \
289
DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len)
290
291
struct monitored_device {
292
struct list_head list;
293
294
bdaddr_t bdaddr;
295
__u8 addr_type;
296
__u16 handle;
297
bool notified;
298
};
299
300
struct adv_pattern {
301
struct list_head list;
302
__u8 ad_type;
303
__u8 offset;
304
__u8 length;
305
__u8 value[HCI_MAX_EXT_AD_LENGTH];
306
};
307
308
struct adv_rssi_thresholds {
309
__s8 low_threshold;
310
__s8 high_threshold;
311
__u16 low_threshold_timeout;
312
__u16 high_threshold_timeout;
313
__u8 sampling_period;
314
};
315
316
struct adv_monitor {
317
struct list_head patterns;
318
struct adv_rssi_thresholds rssi;
319
__u16 handle;
320
321
enum {
322
ADV_MONITOR_STATE_NOT_REGISTERED,
323
ADV_MONITOR_STATE_REGISTERED,
324
ADV_MONITOR_STATE_OFFLOADED
325
} state;
326
};
327
328
#define HCI_MIN_ADV_MONITOR_HANDLE 1
329
#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
330
#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
331
#define HCI_ADV_MONITOR_EXT_NONE 1
332
#define HCI_ADV_MONITOR_EXT_MSFT 2
333
334
#define HCI_MAX_SHORT_NAME_LENGTH 10
335
336
#define HCI_CONN_HANDLE_MAX 0x0eff
337
#define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX)
338
339
/* Min encryption key size to match with SMP */
340
#define HCI_MIN_ENC_KEY_SIZE 7
341
342
/* Default LE RPA expiry time, 15 minutes */
343
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
344
345
/* Default min/max age of connection information (1s/3s) */
346
#define DEFAULT_CONN_INFO_MIN_AGE 1000
347
#define DEFAULT_CONN_INFO_MAX_AGE 3000
348
/* Default authenticated payload timeout 30s */
349
#define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8
350
351
#define HCI_MAX_PAGES 3
352
353
struct hci_dev {
354
struct list_head list;
355
struct srcu_struct srcu;
356
struct mutex lock;
357
358
struct ida unset_handle_ida;
359
360
const char *name;
361
unsigned long flags;
362
__u16 id;
363
__u8 bus;
364
bdaddr_t bdaddr;
365
bdaddr_t setup_addr;
366
bdaddr_t public_addr;
367
bdaddr_t random_addr;
368
bdaddr_t static_addr;
369
__u8 adv_addr_type;
370
__u8 dev_name[HCI_MAX_NAME_LENGTH];
371
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
372
__u8 eir[HCI_MAX_EIR_LENGTH];
373
__u16 appearance;
374
__u8 dev_class[3];
375
__u8 major_class;
376
__u8 minor_class;
377
__u8 max_page;
378
__u8 features[HCI_MAX_PAGES][8];
379
__u8 le_features[8];
380
__u8 le_accept_list_size;
381
__u8 le_resolv_list_size;
382
__u8 le_num_of_adv_sets;
383
__u8 le_states[8];
384
__u8 mesh_ad_types[16];
385
__u8 mesh_send_ref;
386
__u8 commands[64];
387
__u8 hci_ver;
388
__u16 hci_rev;
389
__u8 lmp_ver;
390
__u16 manufacturer;
391
__u16 lmp_subver;
392
__u16 voice_setting;
393
__u8 num_iac;
394
__u16 stored_max_keys;
395
__u16 stored_num_keys;
396
__u8 io_capability;
397
__s8 inq_tx_power;
398
__u8 err_data_reporting;
399
__u16 page_scan_interval;
400
__u16 page_scan_window;
401
__u8 page_scan_type;
402
__u8 le_adv_channel_map;
403
__u16 le_adv_min_interval;
404
__u16 le_adv_max_interval;
405
__u8 le_scan_type;
406
__u16 le_scan_interval;
407
__u16 le_scan_window;
408
__u16 le_scan_int_suspend;
409
__u16 le_scan_window_suspend;
410
__u16 le_scan_int_discovery;
411
__u16 le_scan_window_discovery;
412
__u16 le_scan_int_adv_monitor;
413
__u16 le_scan_window_adv_monitor;
414
__u16 le_scan_int_connect;
415
__u16 le_scan_window_connect;
416
__u16 le_conn_min_interval;
417
__u16 le_conn_max_interval;
418
__u16 le_conn_latency;
419
__u16 le_supv_timeout;
420
__u16 le_def_tx_len;
421
__u16 le_def_tx_time;
422
__u16 le_max_tx_len;
423
__u16 le_max_tx_time;
424
__u16 le_max_rx_len;
425
__u16 le_max_rx_time;
426
__u8 le_max_key_size;
427
__u8 le_min_key_size;
428
__u16 discov_interleaved_timeout;
429
__u16 conn_info_min_age;
430
__u16 conn_info_max_age;
431
__u16 auth_payload_timeout;
432
__u8 min_enc_key_size;
433
__u8 max_enc_key_size;
434
__u8 pairing_opts;
435
__u8 ssp_debug_mode;
436
__u8 hw_error_code;
437
__u32 clock;
438
__u16 advmon_allowlist_duration;
439
__u16 advmon_no_filter_duration;
440
__u8 enable_advmon_interleave_scan;
441
442
__u16 devid_source;
443
__u16 devid_vendor;
444
__u16 devid_product;
445
__u16 devid_version;
446
447
__u8 def_page_scan_type;
448
__u16 def_page_scan_int;
449
__u16 def_page_scan_window;
450
__u8 def_inq_scan_type;
451
__u16 def_inq_scan_int;
452
__u16 def_inq_scan_window;
453
__u16 def_br_lsto;
454
__u16 def_page_timeout;
455
__u16 def_multi_adv_rotation_duration;
456
__u16 def_le_autoconnect_timeout;
457
__s8 min_le_tx_power;
458
__s8 max_le_tx_power;
459
460
__u16 pkt_type;
461
__u16 esco_type;
462
__u16 link_policy;
463
__u16 link_mode;
464
465
__u32 idle_timeout;
466
__u16 sniff_min_interval;
467
__u16 sniff_max_interval;
468
469
unsigned int auto_accept_delay;
470
471
DECLARE_BITMAP(quirk_flags, __HCI_NUM_QUIRKS);
472
473
atomic_t cmd_cnt;
474
unsigned int acl_cnt;
475
unsigned int sco_cnt;
476
unsigned int le_cnt;
477
unsigned int iso_cnt;
478
479
unsigned int acl_mtu;
480
unsigned int sco_mtu;
481
unsigned int le_mtu;
482
unsigned int iso_mtu;
483
unsigned int acl_pkts;
484
unsigned int sco_pkts;
485
unsigned int le_pkts;
486
unsigned int iso_pkts;
487
488
unsigned long acl_last_tx;
489
unsigned long le_last_tx;
490
unsigned long iso_last_tx;
491
492
__u8 le_tx_def_phys;
493
__u8 le_rx_def_phys;
494
495
struct workqueue_struct *workqueue;
496
struct workqueue_struct *req_workqueue;
497
498
struct work_struct power_on;
499
struct delayed_work power_off;
500
struct work_struct error_reset;
501
struct work_struct cmd_sync_work;
502
struct list_head cmd_sync_work_list;
503
struct mutex cmd_sync_work_lock;
504
struct mutex unregister_lock;
505
struct work_struct cmd_sync_cancel_work;
506
struct work_struct reenable_adv_work;
507
508
__u16 discov_timeout;
509
struct delayed_work discov_off;
510
511
struct delayed_work service_cache;
512
513
struct delayed_work cmd_timer;
514
struct delayed_work ncmd_timer;
515
516
struct work_struct rx_work;
517
struct work_struct cmd_work;
518
struct work_struct tx_work;
519
520
struct delayed_work le_scan_disable;
521
522
struct sk_buff_head rx_q;
523
struct sk_buff_head raw_q;
524
struct sk_buff_head cmd_q;
525
526
struct sk_buff *sent_cmd;
527
struct sk_buff *recv_event;
528
529
struct mutex req_lock;
530
wait_queue_head_t req_wait_q;
531
__u32 req_status;
532
__u32 req_result;
533
struct sk_buff *req_skb;
534
struct sk_buff *req_rsp;
535
536
void *smp_data;
537
void *smp_bredr_data;
538
539
struct discovery_state discovery;
540
541
bool discovery_paused;
542
int advertising_old_state;
543
bool advertising_paused;
544
545
struct notifier_block suspend_notifier;
546
enum suspended_state suspend_state_next;
547
enum suspended_state suspend_state;
548
bool scanning_paused;
549
bool suspended;
550
u8 wake_reason;
551
bdaddr_t wake_addr;
552
u8 wake_addr_type;
553
554
struct hci_conn_hash conn_hash;
555
556
struct list_head mesh_pending;
557
struct mutex mgmt_pending_lock;
558
struct list_head mgmt_pending;
559
struct list_head reject_list;
560
struct list_head accept_list;
561
struct list_head uuids;
562
struct list_head link_keys;
563
struct list_head long_term_keys;
564
struct list_head identity_resolving_keys;
565
struct list_head remote_oob_data;
566
struct list_head le_accept_list;
567
struct list_head le_resolv_list;
568
struct list_head le_conn_params;
569
struct list_head pend_le_conns;
570
struct list_head pend_le_reports;
571
struct list_head blocked_keys;
572
struct list_head local_codecs;
573
574
struct hci_dev_stats stat;
575
576
atomic_t promisc;
577
578
const char *hw_info;
579
const char *fw_info;
580
struct dentry *debugfs;
581
582
struct hci_devcoredump dump;
583
584
struct device dev;
585
586
struct rfkill *rfkill;
587
588
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
589
hci_conn_flags_t conn_flags;
590
591
__s8 adv_tx_power;
592
__u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
593
__u8 adv_data_len;
594
__u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
595
__u8 scan_rsp_data_len;
596
__u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
597
__u8 per_adv_data_len;
598
599
struct list_head adv_instances;
600
unsigned int adv_instance_cnt;
601
__u8 cur_adv_instance;
602
__u16 adv_instance_timeout;
603
struct delayed_work adv_instance_expire;
604
605
struct idr adv_monitors_idr;
606
unsigned int adv_monitors_cnt;
607
608
__u8 irk[16];
609
__u32 rpa_timeout;
610
struct delayed_work rpa_expired;
611
bdaddr_t rpa;
612
613
struct delayed_work mesh_send_done;
614
615
enum {
616
INTERLEAVE_SCAN_NONE,
617
INTERLEAVE_SCAN_NO_FILTER,
618
INTERLEAVE_SCAN_ALLOWLIST
619
} interleave_scan_state;
620
621
struct delayed_work interleave_scan;
622
623
struct list_head monitored_devices;
624
bool advmon_pend_notify;
625
626
struct hci_drv *hci_drv;
627
628
#if IS_ENABLED(CONFIG_BT_LEDS)
629
struct led_trigger *power_led;
630
#endif
631
632
#if IS_ENABLED(CONFIG_BT_MSFTEXT)
633
__u16 msft_opcode;
634
void *msft_data;
635
bool msft_curve_validity;
636
#endif
637
638
#if IS_ENABLED(CONFIG_BT_AOSPEXT)
639
bool aosp_capable;
640
bool aosp_quality_report;
641
#endif
642
643
int (*open)(struct hci_dev *hdev);
644
int (*close)(struct hci_dev *hdev);
645
int (*flush)(struct hci_dev *hdev);
646
int (*setup)(struct hci_dev *hdev);
647
int (*shutdown)(struct hci_dev *hdev);
648
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
649
void (*notify)(struct hci_dev *hdev, unsigned int evt);
650
void (*hw_error)(struct hci_dev *hdev, u8 code);
651
int (*post_init)(struct hci_dev *hdev);
652
int (*set_diag)(struct hci_dev *hdev, bool enable);
653
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
654
void (*reset)(struct hci_dev *hdev);
655
bool (*wakeup)(struct hci_dev *hdev);
656
int (*set_quality_report)(struct hci_dev *hdev, bool enable);
657
int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
658
int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
659
struct bt_codec *codec, __u8 *vnd_len,
660
__u8 **vnd_data);
661
u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb);
662
};
663
664
#define hci_set_quirk(hdev, nr) set_bit((nr), (hdev)->quirk_flags)
665
#define hci_clear_quirk(hdev, nr) clear_bit((nr), (hdev)->quirk_flags)
666
#define hci_test_quirk(hdev, nr) test_bit((nr), (hdev)->quirk_flags)
667
668
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
669
670
enum conn_reasons {
671
CONN_REASON_PAIR_DEVICE,
672
CONN_REASON_L2CAP_CHAN,
673
CONN_REASON_SCO_CONNECT,
674
CONN_REASON_ISO_CONNECT,
675
};
676
677
struct hci_conn {
678
struct list_head list;
679
680
atomic_t refcnt;
681
682
bdaddr_t dst;
683
__u8 dst_type;
684
bdaddr_t src;
685
__u8 src_type;
686
bdaddr_t init_addr;
687
__u8 init_addr_type;
688
bdaddr_t resp_addr;
689
__u8 resp_addr_type;
690
__u8 adv_instance;
691
__u16 handle;
692
__u16 sync_handle;
693
__u8 sid;
694
__u16 state;
695
__u16 mtu;
696
__u8 mode;
697
__u8 type;
698
__u8 role;
699
bool out;
700
__u8 attempt;
701
__u8 dev_class[3];
702
__u8 features[HCI_MAX_PAGES][8];
703
__u16 pkt_type;
704
__u16 link_policy;
705
__u8 key_type;
706
__u8 auth_type;
707
__u8 sec_level;
708
__u8 pending_sec_level;
709
__u8 pin_length;
710
__u8 enc_key_size;
711
__u8 io_capability;
712
__u32 passkey_notify;
713
__u8 passkey_entered;
714
__u16 disc_timeout;
715
__u16 conn_timeout;
716
__u16 setting;
717
__u16 auth_payload_timeout;
718
__u16 le_conn_min_interval;
719
__u16 le_conn_max_interval;
720
__u16 le_conn_interval;
721
__u16 le_conn_latency;
722
__u16 le_supv_timeout;
723
__u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
724
__u8 le_adv_data_len;
725
__u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
726
__u16 le_per_adv_data_len;
727
__u16 le_per_adv_data_offset;
728
__u8 le_adv_phy;
729
__u8 le_adv_sec_phy;
730
__u8 le_tx_phy;
731
__u8 le_rx_phy;
732
__s8 rssi;
733
__s8 tx_power;
734
__s8 max_tx_power;
735
struct bt_iso_qos iso_qos;
736
__u8 num_bis;
737
__u8 bis[HCI_MAX_ISO_BIS];
738
739
unsigned long flags;
740
741
enum conn_reasons conn_reason;
742
__u8 abort_reason;
743
744
__u32 clock;
745
__u16 clock_accuracy;
746
747
unsigned long conn_info_timestamp;
748
749
__u8 remote_cap;
750
__u8 remote_auth;
751
__u8 remote_id;
752
753
unsigned int sent;
754
755
struct sk_buff_head data_q;
756
struct list_head chan_list;
757
758
struct tx_queue tx_q;
759
760
struct delayed_work disc_work;
761
struct delayed_work auto_accept_work;
762
struct delayed_work idle_work;
763
struct delayed_work le_conn_timeout;
764
765
struct device dev;
766
struct dentry *debugfs;
767
768
struct hci_dev *hdev;
769
void *l2cap_data;
770
void *sco_data;
771
void *iso_data;
772
773
struct list_head link_list;
774
struct hci_conn *parent;
775
struct hci_link *link;
776
777
struct bt_codec codec;
778
779
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
780
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
781
void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
782
783
void (*cleanup)(struct hci_conn *conn);
784
};
785
786
struct hci_link {
787
struct list_head list;
788
struct hci_conn *conn;
789
};
790
791
struct hci_chan {
792
struct list_head list;
793
__u16 handle;
794
struct hci_conn *conn;
795
struct sk_buff_head data_q;
796
unsigned int sent;
797
__u8 state;
798
};
799
800
struct hci_conn_params {
801
struct list_head list;
802
struct list_head action;
803
804
bdaddr_t addr;
805
u8 addr_type;
806
807
u16 conn_min_interval;
808
u16 conn_max_interval;
809
u16 conn_latency;
810
u16 supervision_timeout;
811
812
enum {
813
HCI_AUTO_CONN_DISABLED,
814
HCI_AUTO_CONN_REPORT,
815
HCI_AUTO_CONN_DIRECT,
816
HCI_AUTO_CONN_ALWAYS,
817
HCI_AUTO_CONN_LINK_LOSS,
818
HCI_AUTO_CONN_EXPLICIT,
819
} auto_connect;
820
821
struct hci_conn *conn;
822
bool explicit_connect;
823
/* Accessed without hdev->lock: */
824
hci_conn_flags_t flags;
825
u8 privacy_mode;
826
};
827
828
extern struct list_head hci_dev_list;
829
extern struct list_head hci_cb_list;
830
extern rwlock_t hci_dev_list_lock;
831
extern struct mutex hci_cb_list_lock;
832
833
#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
834
#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
835
#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
836
#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
837
#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
838
#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
839
#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
840
841
#define hci_dev_clear_volatile_flags(hdev) \
842
do { \
843
hci_dev_clear_flag((hdev), HCI_LE_SCAN); \
844
hci_dev_clear_flag((hdev), HCI_LE_ADV); \
845
hci_dev_clear_flag((hdev), HCI_LL_RPA_RESOLUTION); \
846
hci_dev_clear_flag((hdev), HCI_PERIODIC_INQ); \
847
hci_dev_clear_flag((hdev), HCI_QUALITY_REPORT); \
848
} while (0)
849
850
#define hci_dev_le_state_simultaneous(hdev) \
851
(!hci_test_quirk((hdev), HCI_QUIRK_BROKEN_LE_STATES) && \
852
((hdev)->le_states[4] & 0x08) && /* Central */ \
853
((hdev)->le_states[4] & 0x40) && /* Peripheral */ \
854
((hdev)->le_states[3] & 0x10)) /* Simultaneous */
855
856
/* ----- HCI interface to upper protocols ----- */
857
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
858
int l2cap_disconn_ind(struct hci_conn *hcon);
859
void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
860
861
#if IS_ENABLED(CONFIG_BT_BREDR)
862
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
863
void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
864
#else
865
static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
866
__u8 *flags)
867
{
868
return 0;
869
}
870
871
static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
872
{
873
}
874
#endif
875
876
#if IS_ENABLED(CONFIG_BT_LE)
877
int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
878
void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
879
#else
880
static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
881
__u8 *flags)
882
{
883
return 0;
884
}
885
static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
886
u16 flags)
887
{
888
}
889
#endif
890
891
/* ----- Inquiry cache ----- */
892
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
893
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
894
895
static inline void discovery_init(struct hci_dev *hdev)
896
{
897
spin_lock_init(&hdev->discovery.lock);
898
hdev->discovery.state = DISCOVERY_STOPPED;
899
INIT_LIST_HEAD(&hdev->discovery.all);
900
INIT_LIST_HEAD(&hdev->discovery.unknown);
901
INIT_LIST_HEAD(&hdev->discovery.resolve);
902
hdev->discovery.report_invalid_rssi = true;
903
hdev->discovery.rssi = HCI_RSSI_INVALID;
904
}
905
906
static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
907
{
908
hdev->discovery.result_filtering = false;
909
hdev->discovery.report_invalid_rssi = true;
910
hdev->discovery.rssi = HCI_RSSI_INVALID;
911
hdev->discovery.uuid_count = 0;
912
913
spin_lock(&hdev->discovery.lock);
914
kfree(hdev->discovery.uuids);
915
hdev->discovery.uuids = NULL;
916
spin_unlock(&hdev->discovery.lock);
917
}
918
919
bool hci_discovery_active(struct hci_dev *hdev);
920
921
void hci_discovery_set_state(struct hci_dev *hdev, int state);
922
923
static inline int inquiry_cache_empty(struct hci_dev *hdev)
924
{
925
return list_empty(&hdev->discovery.all);
926
}
927
928
static inline long inquiry_cache_age(struct hci_dev *hdev)
929
{
930
struct discovery_state *c = &hdev->discovery;
931
return jiffies - c->timestamp;
932
}
933
934
static inline long inquiry_entry_age(struct inquiry_entry *e)
935
{
936
return jiffies - e->timestamp;
937
}
938
939
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
940
bdaddr_t *bdaddr);
941
struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
942
bdaddr_t *bdaddr);
943
struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
944
bdaddr_t *bdaddr,
945
int state);
946
void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
947
struct inquiry_entry *ie);
948
u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
949
bool name_known);
950
void hci_inquiry_cache_flush(struct hci_dev *hdev);
951
952
/* ----- HCI Connections ----- */
953
enum {
954
HCI_CONN_AUTH_PEND,
955
HCI_CONN_ENCRYPT_PEND,
956
HCI_CONN_RSWITCH_PEND,
957
HCI_CONN_MODE_CHANGE_PEND,
958
HCI_CONN_SCO_SETUP_PEND,
959
HCI_CONN_MGMT_CONNECTED,
960
HCI_CONN_SSP_ENABLED,
961
HCI_CONN_SC_ENABLED,
962
HCI_CONN_AES_CCM,
963
HCI_CONN_POWER_SAVE,
964
HCI_CONN_FLUSH_KEY,
965
HCI_CONN_ENCRYPT,
966
HCI_CONN_AUTH,
967
HCI_CONN_SECURE,
968
HCI_CONN_FIPS,
969
HCI_CONN_STK_ENCRYPT,
970
HCI_CONN_AUTH_INITIATOR,
971
HCI_CONN_DROP,
972
HCI_CONN_CANCEL,
973
HCI_CONN_PARAM_REMOVAL_PEND,
974
HCI_CONN_NEW_LINK_KEY,
975
HCI_CONN_SCANNING,
976
HCI_CONN_AUTH_FAILURE,
977
HCI_CONN_PER_ADV,
978
HCI_CONN_BIG_CREATED,
979
HCI_CONN_CREATE_CIS,
980
HCI_CONN_CREATE_BIG_SYNC,
981
HCI_CONN_BIG_SYNC,
982
HCI_CONN_BIG_SYNC_FAILED,
983
HCI_CONN_CREATE_PA_SYNC,
984
HCI_CONN_PA_SYNC,
985
HCI_CONN_PA_SYNC_FAILED,
986
};
987
988
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
989
{
990
struct hci_dev *hdev = conn->hdev;
991
return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
992
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
993
}
994
995
static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
996
{
997
struct hci_dev *hdev = conn->hdev;
998
return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
999
test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
1000
}
1001
1002
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
1003
{
1004
struct hci_conn_hash *h = &hdev->conn_hash;
1005
list_add_tail_rcu(&c->list, &h->list);
1006
switch (c->type) {
1007
case ACL_LINK:
1008
h->acl_num++;
1009
break;
1010
case LE_LINK:
1011
h->le_num++;
1012
if (c->role == HCI_ROLE_SLAVE)
1013
h->le_num_peripheral++;
1014
break;
1015
case SCO_LINK:
1016
case ESCO_LINK:
1017
h->sco_num++;
1018
break;
1019
case CIS_LINK:
1020
h->cis_num++;
1021
break;
1022
case BIS_LINK:
1023
h->bis_num++;
1024
break;
1025
case PA_LINK:
1026
h->pa_num++;
1027
break;
1028
}
1029
}
1030
1031
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
1032
{
1033
struct hci_conn_hash *h = &hdev->conn_hash;
1034
1035
list_del_rcu(&c->list);
1036
synchronize_rcu();
1037
1038
switch (c->type) {
1039
case ACL_LINK:
1040
h->acl_num--;
1041
break;
1042
case LE_LINK:
1043
h->le_num--;
1044
if (c->role == HCI_ROLE_SLAVE)
1045
h->le_num_peripheral--;
1046
break;
1047
case SCO_LINK:
1048
case ESCO_LINK:
1049
h->sco_num--;
1050
break;
1051
case CIS_LINK:
1052
h->cis_num--;
1053
break;
1054
case BIS_LINK:
1055
h->bis_num--;
1056
break;
1057
case PA_LINK:
1058
h->pa_num--;
1059
break;
1060
}
1061
}
1062
1063
static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
1064
{
1065
struct hci_conn_hash *h = &hdev->conn_hash;
1066
switch (type) {
1067
case ACL_LINK:
1068
return h->acl_num;
1069
case LE_LINK:
1070
return h->le_num;
1071
case SCO_LINK:
1072
case ESCO_LINK:
1073
return h->sco_num;
1074
case CIS_LINK:
1075
return h->cis_num;
1076
case BIS_LINK:
1077
return h->bis_num;
1078
case PA_LINK:
1079
return h->pa_num;
1080
default:
1081
return 0;
1082
}
1083
}
1084
1085
static inline unsigned int hci_conn_count(struct hci_dev *hdev)
1086
{
1087
struct hci_conn_hash *c = &hdev->conn_hash;
1088
1089
return c->acl_num + c->sco_num + c->le_num + c->cis_num + c->bis_num +
1090
c->pa_num;
1091
}
1092
1093
static inline unsigned int hci_iso_count(struct hci_dev *hdev)
1094
{
1095
struct hci_conn_hash *c = &hdev->conn_hash;
1096
1097
return c->cis_num + c->bis_num;
1098
}
1099
1100
static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
1101
{
1102
struct hci_conn_hash *h = &hdev->conn_hash;
1103
struct hci_conn *c;
1104
1105
rcu_read_lock();
1106
1107
list_for_each_entry_rcu(c, &h->list, list) {
1108
if (c == conn) {
1109
rcu_read_unlock();
1110
return true;
1111
}
1112
}
1113
rcu_read_unlock();
1114
1115
return false;
1116
}
1117
1118
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
1119
{
1120
struct hci_conn_hash *h = &hdev->conn_hash;
1121
struct hci_conn *c;
1122
__u8 type = INVALID_LINK;
1123
1124
rcu_read_lock();
1125
1126
list_for_each_entry_rcu(c, &h->list, list) {
1127
if (c->handle == handle) {
1128
type = c->type;
1129
break;
1130
}
1131
}
1132
1133
rcu_read_unlock();
1134
1135
return type;
1136
}
1137
1138
static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
1139
bdaddr_t *ba, __u8 bis)
1140
{
1141
struct hci_conn_hash *h = &hdev->conn_hash;
1142
struct hci_conn *c;
1143
1144
rcu_read_lock();
1145
1146
list_for_each_entry_rcu(c, &h->list, list) {
1147
if (bacmp(&c->dst, ba) || c->type != BIS_LINK)
1148
continue;
1149
1150
if (c->iso_qos.bcast.bis == bis) {
1151
rcu_read_unlock();
1152
return c;
1153
}
1154
}
1155
rcu_read_unlock();
1156
1157
return NULL;
1158
}
1159
1160
static inline struct hci_conn *
1161
hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
1162
{
1163
struct hci_conn_hash *h = &hdev->conn_hash;
1164
struct hci_conn *c;
1165
1166
rcu_read_lock();
1167
1168
list_for_each_entry_rcu(c, &h->list, list) {
1169
if (c->type != PA_LINK)
1170
continue;
1171
1172
if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
1173
continue;
1174
1175
rcu_read_unlock();
1176
return c;
1177
}
1178
1179
rcu_read_unlock();
1180
1181
return NULL;
1182
}
1183
1184
static inline struct hci_conn *
1185
hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
1186
bdaddr_t *ba,
1187
__u8 big, __u8 bis)
1188
{
1189
struct hci_conn_hash *h = &hdev->conn_hash;
1190
struct hci_conn *c;
1191
1192
rcu_read_lock();
1193
1194
list_for_each_entry_rcu(c, &h->list, list) {
1195
if (bacmp(&c->dst, ba) || c->type != BIS_LINK ||
1196
!test_bit(HCI_CONN_PER_ADV, &c->flags))
1197
continue;
1198
1199
if (c->iso_qos.bcast.big == big &&
1200
c->iso_qos.bcast.bis == bis) {
1201
rcu_read_unlock();
1202
return c;
1203
}
1204
}
1205
rcu_read_unlock();
1206
1207
return NULL;
1208
}
1209
1210
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
1211
__u16 handle)
1212
{
1213
struct hci_conn_hash *h = &hdev->conn_hash;
1214
struct hci_conn *c;
1215
1216
rcu_read_lock();
1217
1218
list_for_each_entry_rcu(c, &h->list, list) {
1219
if (c->handle == handle) {
1220
rcu_read_unlock();
1221
return c;
1222
}
1223
}
1224
rcu_read_unlock();
1225
1226
return NULL;
1227
}
1228
1229
static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
1230
__u8 type, bdaddr_t *ba)
1231
{
1232
struct hci_conn_hash *h = &hdev->conn_hash;
1233
struct hci_conn *c;
1234
1235
rcu_read_lock();
1236
1237
list_for_each_entry_rcu(c, &h->list, list) {
1238
if (c->type == type && !bacmp(&c->dst, ba)) {
1239
rcu_read_unlock();
1240
return c;
1241
}
1242
}
1243
1244
rcu_read_unlock();
1245
1246
return NULL;
1247
}
1248
1249
static inline struct hci_conn *hci_conn_hash_lookup_role(struct hci_dev *hdev,
1250
__u8 type, __u8 role,
1251
bdaddr_t *ba)
1252
{
1253
struct hci_conn_hash *h = &hdev->conn_hash;
1254
struct hci_conn *c;
1255
1256
rcu_read_lock();
1257
1258
list_for_each_entry_rcu(c, &h->list, list) {
1259
if (c->type == type && c->role == role && !bacmp(&c->dst, ba)) {
1260
rcu_read_unlock();
1261
return c;
1262
}
1263
}
1264
1265
rcu_read_unlock();
1266
1267
return NULL;
1268
}
1269
1270
static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
1271
bdaddr_t *ba,
1272
__u8 ba_type)
1273
{
1274
struct hci_conn_hash *h = &hdev->conn_hash;
1275
struct hci_conn *c;
1276
1277
rcu_read_lock();
1278
1279
list_for_each_entry_rcu(c, &h->list, list) {
1280
if (c->type != LE_LINK)
1281
continue;
1282
1283
if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
1284
rcu_read_unlock();
1285
return c;
1286
}
1287
}
1288
1289
rcu_read_unlock();
1290
1291
return NULL;
1292
}
1293
1294
static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
1295
bdaddr_t *ba,
1296
__u8 ba_type,
1297
__u8 cig,
1298
__u8 id)
1299
{
1300
struct hci_conn_hash *h = &hdev->conn_hash;
1301
struct hci_conn *c;
1302
1303
rcu_read_lock();
1304
1305
list_for_each_entry_rcu(c, &h->list, list) {
1306
if (c->type != CIS_LINK)
1307
continue;
1308
1309
/* Match CIG ID if set */
1310
if (cig != c->iso_qos.ucast.cig)
1311
continue;
1312
1313
/* Match CIS ID if set */
1314
if (id != c->iso_qos.ucast.cis)
1315
continue;
1316
1317
/* Match destination address if set */
1318
if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) {
1319
rcu_read_unlock();
1320
return c;
1321
}
1322
}
1323
1324
rcu_read_unlock();
1325
1326
return NULL;
1327
}
1328
1329
static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
1330
__u8 handle)
1331
{
1332
struct hci_conn_hash *h = &hdev->conn_hash;
1333
struct hci_conn *c;
1334
1335
rcu_read_lock();
1336
1337
list_for_each_entry_rcu(c, &h->list, list) {
1338
if (c->type != CIS_LINK)
1339
continue;
1340
1341
if (handle == c->iso_qos.ucast.cig) {
1342
rcu_read_unlock();
1343
return c;
1344
}
1345
}
1346
1347
rcu_read_unlock();
1348
1349
return NULL;
1350
}
1351
1352
static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
1353
__u8 handle)
1354
{
1355
struct hci_conn_hash *h = &hdev->conn_hash;
1356
struct hci_conn *c;
1357
1358
rcu_read_lock();
1359
1360
list_for_each_entry_rcu(c, &h->list, list) {
1361
if (c->type != BIS_LINK)
1362
continue;
1363
1364
if (handle == c->iso_qos.bcast.big) {
1365
rcu_read_unlock();
1366
return c;
1367
}
1368
}
1369
1370
rcu_read_unlock();
1371
1372
return NULL;
1373
}
1374
1375
static inline struct hci_conn *
1376
hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
1377
__u8 handle, __u8 num_bis)
1378
{
1379
struct hci_conn_hash *h = &hdev->conn_hash;
1380
struct hci_conn *c;
1381
1382
rcu_read_lock();
1383
1384
list_for_each_entry_rcu(c, &h->list, list) {
1385
if (c->type != PA_LINK)
1386
continue;
1387
1388
if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
1389
rcu_read_unlock();
1390
return c;
1391
}
1392
}
1393
1394
rcu_read_unlock();
1395
1396
return NULL;
1397
}
1398
1399
static inline struct hci_conn *
1400
hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state,
1401
__u8 role)
1402
{
1403
struct hci_conn_hash *h = &hdev->conn_hash;
1404
struct hci_conn *c;
1405
1406
rcu_read_lock();
1407
1408
list_for_each_entry_rcu(c, &h->list, list) {
1409
if (c->type != BIS_LINK || c->state != state || c->role != role)
1410
continue;
1411
1412
if (handle == c->iso_qos.bcast.big) {
1413
rcu_read_unlock();
1414
return c;
1415
}
1416
}
1417
1418
rcu_read_unlock();
1419
1420
return NULL;
1421
}
1422
1423
static inline struct hci_conn *
1424
hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
1425
{
1426
struct hci_conn_hash *h = &hdev->conn_hash;
1427
struct hci_conn *c;
1428
1429
rcu_read_lock();
1430
1431
list_for_each_entry_rcu(c, &h->list, list) {
1432
if (c->type != BIS_LINK ||
1433
!test_bit(HCI_CONN_PA_SYNC, &c->flags))
1434
continue;
1435
1436
if (c->iso_qos.bcast.big == big) {
1437
rcu_read_unlock();
1438
return c;
1439
}
1440
}
1441
rcu_read_unlock();
1442
1443
return NULL;
1444
}
1445
1446
static inline struct hci_conn *
1447
hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
1448
{
1449
struct hci_conn_hash *h = &hdev->conn_hash;
1450
struct hci_conn *c;
1451
1452
rcu_read_lock();
1453
1454
list_for_each_entry_rcu(c, &h->list, list) {
1455
if (c->type != PA_LINK)
1456
continue;
1457
1458
/* Ignore the listen hcon, we are looking
1459
* for the child hcon that was created as
1460
* a result of the PA sync established event.
1461
*/
1462
if (c->state == BT_LISTEN)
1463
continue;
1464
1465
if (c->sync_handle == sync_handle) {
1466
rcu_read_unlock();
1467
return c;
1468
}
1469
}
1470
rcu_read_unlock();
1471
1472
return NULL;
1473
}
1474
1475
typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data);
1476
static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
1477
hci_conn_func_t func, __u8 type,
1478
__u16 state, void *data)
1479
{
1480
struct hci_conn_hash *h = &hdev->conn_hash;
1481
struct hci_conn *c;
1482
1483
if (!func)
1484
return;
1485
1486
rcu_read_lock();
1487
1488
list_for_each_entry_rcu(c, &h->list, list) {
1489
if (c->type == type && c->state == state)
1490
func(c, data);
1491
}
1492
1493
rcu_read_unlock();
1494
}
1495
1496
static inline void hci_conn_hash_list_flag(struct hci_dev *hdev,
1497
hci_conn_func_t func, __u8 type,
1498
__u8 flag, void *data)
1499
{
1500
struct hci_conn_hash *h = &hdev->conn_hash;
1501
struct hci_conn *c;
1502
1503
if (!func)
1504
return;
1505
1506
rcu_read_lock();
1507
1508
list_for_each_entry_rcu(c, &h->list, list) {
1509
if (c->type == type && test_bit(flag, &c->flags))
1510
func(c, data);
1511
}
1512
1513
rcu_read_unlock();
1514
}
1515
1516
static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
1517
{
1518
struct hci_conn_hash *h = &hdev->conn_hash;
1519
struct hci_conn *c;
1520
1521
rcu_read_lock();
1522
1523
list_for_each_entry_rcu(c, &h->list, list) {
1524
if (c->type == LE_LINK && c->state == BT_CONNECT &&
1525
!test_bit(HCI_CONN_SCANNING, &c->flags)) {
1526
rcu_read_unlock();
1527
return c;
1528
}
1529
}
1530
1531
rcu_read_unlock();
1532
1533
return NULL;
1534
}
1535
1536
/* Returns true if an le connection is in the scanning state */
1537
static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1538
{
1539
struct hci_conn_hash *h = &hdev->conn_hash;
1540
struct hci_conn *c;
1541
1542
rcu_read_lock();
1543
1544
list_for_each_entry_rcu(c, &h->list, list) {
1545
if (c->type == LE_LINK && c->state == BT_CONNECT &&
1546
test_bit(HCI_CONN_SCANNING, &c->flags)) {
1547
rcu_read_unlock();
1548
return true;
1549
}
1550
}
1551
1552
rcu_read_unlock();
1553
1554
return false;
1555
}
1556
1557
int hci_disconnect(struct hci_conn *conn, __u8 reason);
1558
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
1559
void hci_sco_setup(struct hci_conn *conn, __u8 status);
1560
bool hci_iso_setup_path(struct hci_conn *conn);
1561
int hci_le_create_cis_pending(struct hci_dev *hdev);
1562
int hci_conn_check_create_cis(struct hci_conn *conn);
1563
1564
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1565
u8 role, u16 handle);
1566
struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1567
bdaddr_t *dst, u8 role);
1568
void hci_conn_del(struct hci_conn *conn);
1569
void hci_conn_hash_flush(struct hci_dev *hdev);
1570
1571
struct hci_chan *hci_chan_create(struct hci_conn *conn);
1572
void hci_chan_del(struct hci_chan *chan);
1573
void hci_chan_list_flush(struct hci_conn *conn);
1574
struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
1575
1576
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1577
u8 dst_type, u8 sec_level,
1578
u16 conn_timeout,
1579
enum conn_reasons conn_reason);
1580
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1581
u8 dst_type, bool dst_resolved, u8 sec_level,
1582
u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
1583
void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
1584
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1585
u8 sec_level, u8 auth_type,
1586
enum conn_reasons conn_reason, u16 timeout);
1587
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1588
__u16 setting, struct bt_codec *codec,
1589
u16 timeout);
1590
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1591
__u8 dst_type, struct bt_iso_qos *qos,
1592
u16 timeout);
1593
struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
1594
struct bt_iso_qos *qos,
1595
__u8 base_len, __u8 *base, u16 timeout);
1596
struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
1597
__u8 dst_type, struct bt_iso_qos *qos,
1598
u16 timeout);
1599
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
1600
__u8 dst_type, __u8 sid,
1601
struct bt_iso_qos *qos,
1602
__u8 data_len, __u8 *data, u16 timeout);
1603
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
1604
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
1605
int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
1606
struct bt_iso_qos *qos, __u16 sync_handle,
1607
__u8 num_bis, __u8 bis[]);
1608
int hci_conn_check_link_mode(struct hci_conn *conn);
1609
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
1610
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1611
bool initiator);
1612
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
1613
1614
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
1615
1616
void hci_conn_failed(struct hci_conn *conn, u8 status);
1617
u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
1618
1619
void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb);
1620
void hci_conn_tx_dequeue(struct hci_conn *conn);
1621
void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
1622
const struct sockcm_cookie *sockc);
1623
1624
static inline void hci_sockcm_init(struct sockcm_cookie *sockc, struct sock *sk)
1625
{
1626
*sockc = (struct sockcm_cookie) {
1627
.tsflags = READ_ONCE(sk->sk_tsflags),
1628
};
1629
}
1630
1631
/*
1632
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
1633
* "hci_conn" object. They do not guarantee that the hci_conn object is running,
1634
* working or anything else. They just guarantee that the object is available
1635
* and can be dereferenced. So you can use its locks, local variables and any
1636
* other constant data.
1637
* Before accessing runtime data, you _must_ lock the object and then check that
1638
* it is still running. As soon as you release the locks, the connection might
1639
* get dropped, though.
1640
*
1641
* On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
1642
* how long the underlying connection is held. So every channel that runs on the
1643
* hci_conn object calls this to prevent the connection from disappearing. As
1644
* long as you hold a device, you must also guarantee that you have a valid
1645
* reference to the device via hci_conn_get() (or the initial reference from
1646
* hci_conn_add()).
1647
* The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
1648
* break because nobody cares for that. But this means, we cannot use
1649
* _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
1650
*/
1651
1652
static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
1653
{
1654
get_device(&conn->dev);
1655
return conn;
1656
}
1657
1658
static inline void hci_conn_put(struct hci_conn *conn)
1659
{
1660
put_device(&conn->dev);
1661
}
1662
1663
static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn)
1664
{
1665
BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
1666
1667
atomic_inc(&conn->refcnt);
1668
cancel_delayed_work(&conn->disc_work);
1669
1670
return conn;
1671
}
1672
1673
static inline void hci_conn_drop(struct hci_conn *conn)
1674
{
1675
BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
1676
1677
if (atomic_dec_and_test(&conn->refcnt)) {
1678
unsigned long timeo;
1679
1680
switch (conn->type) {
1681
case ACL_LINK:
1682
case LE_LINK:
1683
cancel_delayed_work(&conn->idle_work);
1684
if (conn->state == BT_CONNECTED) {
1685
timeo = conn->disc_timeout;
1686
if (!conn->out)
1687
timeo *= 2;
1688
} else {
1689
timeo = 0;
1690
}
1691
break;
1692
1693
default:
1694
timeo = 0;
1695
break;
1696
}
1697
1698
cancel_delayed_work(&conn->disc_work);
1699
queue_delayed_work(conn->hdev->workqueue,
1700
&conn->disc_work, timeo);
1701
}
1702
}
1703
1704
/* ----- HCI Devices ----- */
1705
static inline void hci_dev_put(struct hci_dev *d)
1706
{
1707
BT_DBG("%s orig refcnt %d", d->name,
1708
kref_read(&d->dev.kobj.kref));
1709
1710
put_device(&d->dev);
1711
}
1712
1713
static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
1714
{
1715
BT_DBG("%s orig refcnt %d", d->name,
1716
kref_read(&d->dev.kobj.kref));
1717
1718
get_device(&d->dev);
1719
return d;
1720
}
1721
1722
#define hci_dev_lock(d) mutex_lock(&d->lock)
1723
#define hci_dev_unlock(d) mutex_unlock(&d->lock)
1724
1725
#define to_hci_dev(d) container_of(d, struct hci_dev, dev)
1726
#define to_hci_conn(c) container_of(c, struct hci_conn, dev)
1727
1728
static inline void *hci_get_drvdata(struct hci_dev *hdev)
1729
{
1730
return dev_get_drvdata(&hdev->dev);
1731
}
1732
1733
static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
1734
{
1735
dev_set_drvdata(&hdev->dev, data);
1736
}
1737
1738
static inline void *hci_get_priv(struct hci_dev *hdev)
1739
{
1740
return (char *)hdev + sizeof(*hdev);
1741
}
1742
1743
struct hci_dev *hci_dev_get(int index);
1744
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
1745
1746
struct hci_dev *hci_alloc_dev_priv(int sizeof_priv);
1747
1748
static inline struct hci_dev *hci_alloc_dev(void)
1749
{
1750
return hci_alloc_dev_priv(0);
1751
}
1752
1753
void hci_free_dev(struct hci_dev *hdev);
1754
int hci_register_dev(struct hci_dev *hdev);
1755
void hci_unregister_dev(struct hci_dev *hdev);
1756
void hci_release_dev(struct hci_dev *hdev);
1757
int hci_register_suspend_notifier(struct hci_dev *hdev);
1758
int hci_unregister_suspend_notifier(struct hci_dev *hdev);
1759
int hci_suspend_dev(struct hci_dev *hdev);
1760
int hci_resume_dev(struct hci_dev *hdev);
1761
int hci_reset_dev(struct hci_dev *hdev);
1762
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
1763
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
1764
__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
1765
__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
1766
1767
static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode)
1768
{
1769
#if IS_ENABLED(CONFIG_BT_MSFTEXT)
1770
hdev->msft_opcode = opcode;
1771
#endif
1772
}
1773
1774
static inline void hci_set_aosp_capable(struct hci_dev *hdev)
1775
{
1776
#if IS_ENABLED(CONFIG_BT_AOSPEXT)
1777
hdev->aosp_capable = true;
1778
#endif
1779
}
1780
1781
static inline void hci_devcd_setup(struct hci_dev *hdev)
1782
{
1783
#ifdef CONFIG_DEV_COREDUMP
1784
INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx);
1785
INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout);
1786
skb_queue_head_init(&hdev->dump.dump_q);
1787
#endif
1788
}
1789
1790
int hci_dev_open(__u16 dev);
1791
int hci_dev_close(__u16 dev);
1792
int hci_dev_do_close(struct hci_dev *hdev);
1793
int hci_dev_reset(__u16 dev);
1794
int hci_dev_reset_stat(__u16 dev);
1795
int hci_dev_cmd(unsigned int cmd, void __user *arg);
1796
int hci_get_dev_list(void __user *arg);
1797
int hci_get_dev_info(void __user *arg);
1798
int hci_get_conn_list(void __user *arg);
1799
int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
1800
int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
1801
int hci_inquiry(void __user *arg);
1802
1803
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
1804
bdaddr_t *bdaddr, u8 type);
1805
struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
1806
struct list_head *list, bdaddr_t *bdaddr,
1807
u8 type);
1808
struct bdaddr_list_with_flags *
1809
hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
1810
u8 type);
1811
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
1812
int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
1813
u8 type, u8 *peer_irk, u8 *local_irk);
1814
int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
1815
u8 type, u32 flags);
1816
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
1817
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
1818
u8 type);
1819
void hci_bdaddr_list_clear(struct list_head *list);
1820
1821
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
1822
bdaddr_t *addr, u8 addr_type);
1823
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
1824
bdaddr_t *addr, u8 addr_type);
1825
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
1826
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
1827
void hci_conn_params_free(struct hci_conn_params *param);
1828
1829
void hci_pend_le_list_del_init(struct hci_conn_params *param);
1830
void hci_pend_le_list_add(struct hci_conn_params *param,
1831
struct list_head *list);
1832
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
1833
bdaddr_t *addr,
1834
u8 addr_type);
1835
1836
void hci_uuids_clear(struct hci_dev *hdev);
1837
1838
void hci_link_keys_clear(struct hci_dev *hdev);
1839
u8 *hci_conn_key_enc_size(struct hci_conn *conn);
1840
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
1841
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1842
bdaddr_t *bdaddr, u8 *val, u8 type,
1843
u8 pin_len, bool *persistent);
1844
struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1845
u8 addr_type, u8 type, u8 authenticated,
1846
u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
1847
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1848
u8 addr_type, u8 role);
1849
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
1850
void hci_smp_ltks_clear(struct hci_dev *hdev);
1851
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
1852
1853
struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
1854
struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1855
u8 addr_type);
1856
struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1857
u8 addr_type, u8 val[16], bdaddr_t *rpa);
1858
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
1859
bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]);
1860
void hci_blocked_keys_clear(struct hci_dev *hdev);
1861
void hci_smp_irks_clear(struct hci_dev *hdev);
1862
1863
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1864
1865
void hci_remote_oob_data_clear(struct hci_dev *hdev);
1866
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1867
bdaddr_t *bdaddr, u8 bdaddr_type);
1868
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1869
u8 bdaddr_type, u8 *hash192, u8 *rand192,
1870
u8 *hash256, u8 *rand256);
1871
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1872
u8 bdaddr_type);
1873
1874
void hci_adv_instances_clear(struct hci_dev *hdev);
1875
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
1876
struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid);
1877
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
1878
struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1879
u32 flags, u16 adv_data_len, u8 *adv_data,
1880
u16 scan_rsp_len, u8 *scan_rsp_data,
1881
u16 timeout, u16 duration, s8 tx_power,
1882
u32 min_interval, u32 max_interval,
1883
u8 mesh_handle);
1884
struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid,
1885
u32 flags, u8 data_len, u8 *data,
1886
u32 min_interval, u32 max_interval);
1887
int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1888
u16 adv_data_len, u8 *adv_data,
1889
u16 scan_rsp_len, u8 *scan_rsp_data);
1890
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
1891
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
1892
u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
1893
bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
1894
1895
void hci_adv_monitors_clear(struct hci_dev *hdev);
1896
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
1897
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
1898
int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle);
1899
int hci_remove_all_adv_monitor(struct hci_dev *hdev);
1900
bool hci_is_adv_monitoring(struct hci_dev *hdev);
1901
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
1902
1903
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
1904
1905
void hci_init_sysfs(struct hci_dev *hdev);
1906
void hci_conn_init_sysfs(struct hci_conn *conn);
1907
void hci_conn_add_sysfs(struct hci_conn *conn);
1908
void hci_conn_del_sysfs(struct hci_conn *conn);
1909
1910
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
1911
#define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent)
1912
1913
/* ----- LMP capabilities ----- */
1914
#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
1915
#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
1916
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
1917
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
1918
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
1919
#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO)
1920
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
1921
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
1922
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
1923
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
1924
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
1925
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
1926
#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
1927
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
1928
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
1929
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
1930
#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
1931
#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
1932
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
1933
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
1934
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
1935
#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
1936
#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
1937
#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
1938
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
1939
1940
/* ----- Extended LMP capabilities ----- */
1941
#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL)
1942
#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL)
1943
#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
1944
#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
1945
#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
1946
#define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING)
1947
1948
/* ----- Host capabilities ----- */
1949
#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
1950
#define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC)
1951
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
1952
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
1953
1954
#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
1955
!hci_dev_test_flag(dev, HCI_AUTO_OFF))
1956
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
1957
hci_dev_test_flag(dev, HCI_SC_ENABLED))
1958
#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \
1959
!hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
1960
#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
1961
!adv->rpa_expired)
1962
#define le_enabled(dev) (lmp_le_capable(dev) && \
1963
hci_dev_test_flag(dev, HCI_LE_ENABLED))
1964
1965
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
1966
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
1967
1968
#define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M))
1969
1970
#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
1971
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
1972
1973
#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \
1974
!hci_test_quirk((dev), \
1975
HCI_QUIRK_BROKEN_LE_CODED))
1976
1977
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
1978
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
1979
1980
#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
1981
#define ll_privacy_enabled(dev) (le_enabled(dev) && ll_privacy_capable(dev))
1982
1983
#define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \
1984
((dev)->commands[39] & 0x04))
1985
1986
#define read_key_size_capable(dev) \
1987
((dev)->commands[20] & 0x10 && \
1988
!hci_test_quirk((dev), HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE))
1989
1990
#define read_voice_setting_capable(dev) \
1991
((dev)->commands[9] & 0x04 && \
1992
!hci_test_quirk((dev), HCI_QUIRK_BROKEN_READ_VOICE_SETTING))
1993
1994
/* Use enhanced synchronous connection if command is supported and its quirk
1995
* has not been set.
1996
*/
1997
#define enhanced_sync_conn_capable(dev) \
1998
(((dev)->commands[29] & 0x08) && \
1999
!hci_test_quirk((dev), HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN))
2000
2001
/* Use ext scanning if set ext scan param and ext scan enable is supported */
2002
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
2003
((dev)->commands[37] & 0x40) && \
2004
!hci_test_quirk((dev), HCI_QUIRK_BROKEN_EXT_SCAN))
2005
2006
/* Use ext create connection if command is supported */
2007
#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \
2008
!hci_test_quirk((dev), HCI_QUIRK_BROKEN_EXT_CREATE_CONN))
2009
/* Extended advertising support */
2010
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
2011
2012
/* Maximum advertising length */
2013
#define max_adv_len(dev) \
2014
(ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
2015
2016
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
2017
*
2018
* C24: Mandatory if the LE Controller supports Connection State and either
2019
* LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
2020
*/
2021
#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \
2022
ext_adv_capable(dev)) && \
2023
!hci_test_quirk((dev), \
2024
HCI_QUIRK_BROKEN_EXT_CREATE_CONN))
2025
2026
/* Periodic advertising support */
2027
#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
2028
2029
/* CIS Master/Slave and BIS support */
2030
#define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
2031
#define iso_enabled(dev) (le_enabled(dev) && iso_capable(dev))
2032
#define cis_capable(dev) \
2033
(cis_central_capable(dev) || cis_peripheral_capable(dev))
2034
#define cis_enabled(dev) (le_enabled(dev) && cis_capable(dev))
2035
#define cis_central_capable(dev) \
2036
((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
2037
#define cis_central_enabled(dev) \
2038
(le_enabled(dev) && cis_central_capable(dev))
2039
#define cis_peripheral_capable(dev) \
2040
((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
2041
#define cis_peripheral_enabled(dev) \
2042
(le_enabled(dev) && cis_peripheral_capable(dev))
2043
#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
2044
#define bis_enabled(dev) (le_enabled(dev) && bis_capable(dev))
2045
#define sync_recv_capable(dev) \
2046
((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
2047
#define sync_recv_enabled(dev) (le_enabled(dev) && sync_recv_capable(dev))
2048
2049
#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
2050
(!hci_test_quirk((dev), HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG)))
2051
2052
/* ----- HCI protocols ----- */
2053
#define HCI_PROTO_DEFER 0x01
2054
2055
static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
2056
__u8 type, __u8 *flags)
2057
{
2058
switch (type) {
2059
case ACL_LINK:
2060
return l2cap_connect_ind(hdev, bdaddr);
2061
2062
case SCO_LINK:
2063
case ESCO_LINK:
2064
return sco_connect_ind(hdev, bdaddr, flags);
2065
2066
case CIS_LINK:
2067
case BIS_LINK:
2068
case PA_LINK:
2069
return iso_connect_ind(hdev, bdaddr, flags);
2070
2071
default:
2072
BT_ERR("unknown link type %d", type);
2073
return -EINVAL;
2074
}
2075
}
2076
2077
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
2078
{
2079
if (conn->type != ACL_LINK && conn->type != LE_LINK)
2080
return HCI_ERROR_REMOTE_USER_TERM;
2081
2082
return l2cap_disconn_ind(conn);
2083
}
2084
2085
/* ----- HCI callbacks ----- */
2086
struct hci_cb {
2087
struct list_head list;
2088
2089
char *name;
2090
2091
void (*connect_cfm) (struct hci_conn *conn, __u8 status);
2092
void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
2093
void (*security_cfm) (struct hci_conn *conn, __u8 status,
2094
__u8 encrypt);
2095
void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
2096
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
2097
};
2098
2099
static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
2100
{
2101
struct hci_cb *cb;
2102
2103
mutex_lock(&hci_cb_list_lock);
2104
list_for_each_entry(cb, &hci_cb_list, list) {
2105
if (cb->connect_cfm)
2106
cb->connect_cfm(conn, status);
2107
}
2108
mutex_unlock(&hci_cb_list_lock);
2109
2110
if (conn->connect_cfm_cb)
2111
conn->connect_cfm_cb(conn, status);
2112
}
2113
2114
static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
2115
{
2116
struct hci_cb *cb;
2117
2118
mutex_lock(&hci_cb_list_lock);
2119
list_for_each_entry(cb, &hci_cb_list, list) {
2120
if (cb->disconn_cfm)
2121
cb->disconn_cfm(conn, reason);
2122
}
2123
mutex_unlock(&hci_cb_list_lock);
2124
2125
if (conn->disconn_cfm_cb)
2126
conn->disconn_cfm_cb(conn, reason);
2127
}
2128
2129
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
2130
{
2131
struct hci_cb *cb;
2132
__u8 encrypt;
2133
2134
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2135
return;
2136
2137
encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
2138
2139
mutex_lock(&hci_cb_list_lock);
2140
list_for_each_entry(cb, &hci_cb_list, list) {
2141
if (cb->security_cfm)
2142
cb->security_cfm(conn, status, encrypt);
2143
}
2144
mutex_unlock(&hci_cb_list_lock);
2145
2146
if (conn->security_cfm_cb)
2147
conn->security_cfm_cb(conn, status);
2148
}
2149
2150
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
2151
{
2152
struct hci_cb *cb;
2153
__u8 encrypt;
2154
2155
if (conn->state == BT_CONFIG) {
2156
if (!status)
2157
conn->state = BT_CONNECTED;
2158
2159
hci_connect_cfm(conn, status);
2160
hci_conn_drop(conn);
2161
return;
2162
}
2163
2164
if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2165
encrypt = 0x00;
2166
else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2167
encrypt = 0x02;
2168
else
2169
encrypt = 0x01;
2170
2171
if (!status) {
2172
if (conn->sec_level == BT_SECURITY_SDP)
2173
conn->sec_level = BT_SECURITY_LOW;
2174
2175
if (conn->pending_sec_level > conn->sec_level)
2176
conn->sec_level = conn->pending_sec_level;
2177
}
2178
2179
mutex_lock(&hci_cb_list_lock);
2180
list_for_each_entry(cb, &hci_cb_list, list) {
2181
if (cb->security_cfm)
2182
cb->security_cfm(conn, status, encrypt);
2183
}
2184
mutex_unlock(&hci_cb_list_lock);
2185
2186
if (conn->security_cfm_cb)
2187
conn->security_cfm_cb(conn, status);
2188
}
2189
2190
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
2191
{
2192
struct hci_cb *cb;
2193
2194
mutex_lock(&hci_cb_list_lock);
2195
list_for_each_entry(cb, &hci_cb_list, list) {
2196
if (cb->key_change_cfm)
2197
cb->key_change_cfm(conn, status);
2198
}
2199
mutex_unlock(&hci_cb_list_lock);
2200
}
2201
2202
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
2203
__u8 role)
2204
{
2205
struct hci_cb *cb;
2206
2207
mutex_lock(&hci_cb_list_lock);
2208
list_for_each_entry(cb, &hci_cb_list, list) {
2209
if (cb->role_switch_cfm)
2210
cb->role_switch_cfm(conn, status, role);
2211
}
2212
mutex_unlock(&hci_cb_list_lock);
2213
}
2214
2215
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
2216
{
2217
if (addr_type != ADDR_LE_DEV_RANDOM)
2218
return false;
2219
2220
if ((bdaddr->b[5] & 0xc0) == 0x40)
2221
return true;
2222
2223
return false;
2224
}
2225
2226
static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type)
2227
{
2228
if (addr_type == ADDR_LE_DEV_PUBLIC)
2229
return true;
2230
2231
/* Check for Random Static address type */
2232
if ((addr->b[5] & 0xc0) == 0xc0)
2233
return true;
2234
2235
return false;
2236
}
2237
2238
static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
2239
bdaddr_t *bdaddr, u8 addr_type)
2240
{
2241
if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
2242
return NULL;
2243
2244
return hci_find_irk_by_rpa(hdev, bdaddr);
2245
}
2246
2247
static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
2248
u16 to_multiplier)
2249
{
2250
u16 max_latency;
2251
2252
if (min > max) {
2253
BT_WARN("min %d > max %d", min, max);
2254
return -EINVAL;
2255
}
2256
2257
if (min < 6) {
2258
BT_WARN("min %d < 6", min);
2259
return -EINVAL;
2260
}
2261
2262
if (max > 3200) {
2263
BT_WARN("max %d > 3200", max);
2264
return -EINVAL;
2265
}
2266
2267
if (to_multiplier < 10) {
2268
BT_WARN("to_multiplier %d < 10", to_multiplier);
2269
return -EINVAL;
2270
}
2271
2272
if (to_multiplier > 3200) {
2273
BT_WARN("to_multiplier %d > 3200", to_multiplier);
2274
return -EINVAL;
2275
}
2276
2277
if (max >= to_multiplier * 8) {
2278
BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
2279
return -EINVAL;
2280
}
2281
2282
max_latency = (to_multiplier * 4 / max) - 1;
2283
if (latency > 499) {
2284
BT_WARN("latency %d > 499", latency);
2285
return -EINVAL;
2286
}
2287
2288
if (latency > max_latency) {
2289
BT_WARN("latency %d > max_latency %d", latency, max_latency);
2290
return -EINVAL;
2291
}
2292
2293
return 0;
2294
}
2295
2296
int hci_register_cb(struct hci_cb *hcb);
2297
int hci_unregister_cb(struct hci_cb *hcb);
2298
2299
int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2300
const void *param);
2301
2302
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2303
const void *param);
2304
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
2305
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
2306
void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb);
2307
2308
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
2309
void *hci_recv_event_data(struct hci_dev *hdev, __u8 event);
2310
2311
u32 hci_conn_get_phy(struct hci_conn *conn);
2312
2313
/* ----- HCI Sockets ----- */
2314
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
2315
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
2316
int flag, struct sock *skip_sk);
2317
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
2318
void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
2319
void *data, u16 data_len, ktime_t tstamp,
2320
int flag, struct sock *skip_sk);
2321
2322
void hci_sock_dev_event(struct hci_dev *hdev, int event);
2323
2324
#define HCI_MGMT_VAR_LEN BIT(0)
2325
#define HCI_MGMT_NO_HDEV BIT(1)
2326
#define HCI_MGMT_UNTRUSTED BIT(2)
2327
#define HCI_MGMT_UNCONFIGURED BIT(3)
2328
#define HCI_MGMT_HDEV_OPTIONAL BIT(4)
2329
2330
struct hci_mgmt_handler {
2331
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2332
u16 data_len);
2333
size_t data_len;
2334
unsigned long flags;
2335
};
2336
2337
struct hci_mgmt_chan {
2338
struct list_head list;
2339
unsigned short channel;
2340
size_t handler_count;
2341
const struct hci_mgmt_handler *handlers;
2342
void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
2343
};
2344
2345
int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
2346
void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
2347
2348
/* Management interface */
2349
#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
2350
#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
2351
BIT(BDADDR_LE_RANDOM))
2352
#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
2353
BIT(BDADDR_LE_PUBLIC) | \
2354
BIT(BDADDR_LE_RANDOM))
2355
2356
/* These LE scan and inquiry parameters were chosen according to LE General
2357
* Discovery Procedure specification.
2358
*/
2359
#define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */
2360
#define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */
2361
#define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */
2362
#define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */
2363
#define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */
2364
#define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */
2365
#define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */
2366
#define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */
2367
#define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */
2368
#define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */
2369
#define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */
2370
#define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */
2371
#define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */
2372
#define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */
2373
#define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */
2374
#define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */
2375
#define DISCOV_LE_TIMEOUT 10240 /* msec */
2376
#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
2377
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
2378
#define DISCOV_BREDR_INQUIRY_LEN 0x08
2379
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
2380
#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */
2381
#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
2382
#define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */
2383
#define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */
2384
#define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */
2385
#define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */
2386
#define INTERVAL_TO_MS(x) (((x) * 10) / 0x10)
2387
2388
#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
2389
2390
void mgmt_fill_version_info(void *ver);
2391
int mgmt_new_settings(struct hci_dev *hdev);
2392
void mgmt_index_added(struct hci_dev *hdev);
2393
void mgmt_index_removed(struct hci_dev *hdev);
2394
void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
2395
void mgmt_power_on(struct hci_dev *hdev, int err);
2396
void __mgmt_power_off(struct hci_dev *hdev);
2397
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2398
bool persistent);
2399
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
2400
u8 *name, u8 name_len);
2401
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402
u8 link_type, u8 addr_type, u8 reason,
2403
bool mgmt_connected);
2404
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405
u8 link_type, u8 addr_type, u8 status);
2406
void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn,
2407
u8 status);
2408
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
2409
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2410
u8 status);
2411
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2412
u8 status);
2413
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2414
u8 link_type, u8 addr_type, u32 value,
2415
u8 confirm_hint);
2416
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2417
u8 link_type, u8 addr_type, u8 status);
2418
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2419
u8 link_type, u8 addr_type, u8 status);
2420
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2421
u8 link_type, u8 addr_type);
2422
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423
u8 link_type, u8 addr_type, u8 status);
2424
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2425
u8 link_type, u8 addr_type, u8 status);
2426
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
2427
u8 link_type, u8 addr_type, u32 passkey,
2428
u8 entered);
2429
void mgmt_auth_failed(struct hci_conn *conn, u8 status);
2430
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
2431
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
2432
u8 status);
2433
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
2434
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2435
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
2436
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
2437
u64 instant);
2438
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2439
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
2440
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
2441
void mgmt_suspending(struct hci_dev *hdev, u8 state);
2442
void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
2443
u8 addr_type);
2444
bool mgmt_powering_down(struct hci_dev *hdev);
2445
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
2446
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
2447
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
2448
bool persistent);
2449
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
2450
u8 bdaddr_type, u8 store_hint, u16 min_interval,
2451
u16 max_interval, u16 latency, u16 timeout);
2452
void mgmt_smp_complete(struct hci_conn *conn, bool complete);
2453
bool mgmt_get_connectable(struct hci_dev *hdev);
2454
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
2455
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
2456
u8 instance);
2457
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
2458
u8 instance);
2459
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
2460
void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
2461
bdaddr_t *bdaddr, u8 addr_type);
2462
2463
int hci_abort_conn(struct hci_conn *conn, u8 reason);
2464
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
2465
u16 to_multiplier);
2466
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
2467
__u8 ltk[16], __u8 key_size);
2468
2469
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2470
u8 *bdaddr_type);
2471
2472
#define SCO_AIRMODE_MASK 0x0003
2473
#define SCO_AIRMODE_CVSD 0x0000
2474
#define SCO_AIRMODE_TRANSP 0x0003
2475
2476
#define LOCAL_CODEC_ACL_MASK BIT(0)
2477
#define LOCAL_CODEC_SCO_MASK BIT(1)
2478
2479
#define TRANSPORT_TYPE_MAX 0x04
2480
2481
#endif /* __HCI_CORE_H */
2482
2483