Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/mgmt_util.c
29266 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
4
Copyright (C) 2015 Intel Corporation
5
6
This program is free software; you can redistribute it and/or modify
7
it under the terms of the GNU General Public License version 2 as
8
published by the Free Software Foundation;
9
10
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21
SOFTWARE IS DISCLAIMED.
22
*/
23
24
#include <linux/unaligned.h>
25
26
#include <net/bluetooth/bluetooth.h>
27
#include <net/bluetooth/hci_core.h>
28
#include <net/bluetooth/hci_mon.h>
29
#include <net/bluetooth/mgmt.h>
30
31
#include "mgmt_util.h"
32
33
static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie,
34
u16 opcode, u16 len, void *buf)
35
{
36
struct hci_mon_hdr *hdr;
37
struct sk_buff *skb;
38
39
skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
40
if (!skb)
41
return NULL;
42
43
put_unaligned_le32(cookie, skb_put(skb, 4));
44
put_unaligned_le16(opcode, skb_put(skb, 2));
45
46
if (buf)
47
skb_put_data(skb, buf, len);
48
49
__net_timestamp(skb);
50
51
hdr = skb_push(skb, HCI_MON_HDR_SIZE);
52
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
53
hdr->index = index;
54
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
55
56
return skb;
57
}
58
59
struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
60
unsigned int size)
61
{
62
struct sk_buff *skb;
63
64
skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL);
65
if (!skb)
66
return skb;
67
68
skb_reserve(skb, sizeof(struct mgmt_hdr));
69
bt_cb(skb)->mgmt.hdev = hdev;
70
bt_cb(skb)->mgmt.opcode = opcode;
71
72
return skb;
73
}
74
75
int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
76
struct sock *skip_sk)
77
{
78
struct hci_dev *hdev;
79
struct mgmt_hdr *hdr;
80
int len;
81
82
if (!skb)
83
return -EINVAL;
84
85
len = skb->len;
86
hdev = bt_cb(skb)->mgmt.hdev;
87
88
/* Time stamp */
89
__net_timestamp(skb);
90
91
/* Send just the data, without headers, to the monitor */
92
if (channel == HCI_CHANNEL_CONTROL)
93
hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode,
94
skb->data, skb->len,
95
skb_get_ktime(skb), flag, skip_sk);
96
97
hdr = skb_push(skb, sizeof(*hdr));
98
hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode);
99
if (hdev)
100
hdr->index = cpu_to_le16(hdev->id);
101
else
102
hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
103
hdr->len = cpu_to_le16(len);
104
105
hci_send_to_channel(channel, skb, flag, skip_sk);
106
107
kfree_skb(skb);
108
return 0;
109
}
110
111
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
112
void *data, u16 data_len, int flag, struct sock *skip_sk)
113
{
114
struct sk_buff *skb;
115
116
skb = mgmt_alloc_skb(hdev, event, data_len);
117
if (!skb)
118
return -ENOMEM;
119
120
if (data)
121
skb_put_data(skb, data, data_len);
122
123
return mgmt_send_event_skb(channel, skb, flag, skip_sk);
124
}
125
126
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
127
{
128
struct sk_buff *skb, *mskb;
129
struct mgmt_hdr *hdr;
130
struct mgmt_ev_cmd_status *ev;
131
int err;
132
133
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
134
135
skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
136
if (!skb)
137
return -ENOMEM;
138
139
hdr = skb_put(skb, sizeof(*hdr));
140
141
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
142
hdr->index = cpu_to_le16(index);
143
hdr->len = cpu_to_le16(sizeof(*ev));
144
145
ev = skb_put(skb, sizeof(*ev));
146
ev->status = status;
147
ev->opcode = cpu_to_le16(cmd);
148
149
mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
150
MGMT_EV_CMD_STATUS, sizeof(*ev), ev);
151
if (mskb)
152
skb->tstamp = mskb->tstamp;
153
else
154
__net_timestamp(skb);
155
156
err = sock_queue_rcv_skb(sk, skb);
157
if (err < 0)
158
kfree_skb(skb);
159
160
if (mskb) {
161
hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb,
162
HCI_SOCK_TRUSTED, NULL);
163
kfree_skb(mskb);
164
}
165
166
return err;
167
}
168
169
int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
170
void *rp, size_t rp_len)
171
{
172
struct sk_buff *skb, *mskb;
173
struct mgmt_hdr *hdr;
174
struct mgmt_ev_cmd_complete *ev;
175
int err;
176
177
BT_DBG("sock %p", sk);
178
179
skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
180
if (!skb)
181
return -ENOMEM;
182
183
hdr = skb_put(skb, sizeof(*hdr));
184
185
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
186
hdr->index = cpu_to_le16(index);
187
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
188
189
ev = skb_put(skb, sizeof(*ev) + rp_len);
190
ev->opcode = cpu_to_le16(cmd);
191
ev->status = status;
192
193
if (rp)
194
memcpy(ev->data, rp, rp_len);
195
196
mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
197
MGMT_EV_CMD_COMPLETE,
198
sizeof(*ev) + rp_len, ev);
199
if (mskb)
200
skb->tstamp = mskb->tstamp;
201
else
202
__net_timestamp(skb);
203
204
err = sock_queue_rcv_skb(sk, skb);
205
if (err < 0)
206
kfree_skb(skb);
207
208
if (mskb) {
209
hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb,
210
HCI_SOCK_TRUSTED, NULL);
211
kfree_skb(mskb);
212
}
213
214
return err;
215
}
216
217
struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
218
struct hci_dev *hdev)
219
{
220
struct mgmt_pending_cmd *cmd, *tmp;
221
222
mutex_lock(&hdev->mgmt_pending_lock);
223
224
list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
225
if (hci_sock_get_channel(cmd->sk) != channel)
226
continue;
227
228
if (cmd->opcode == opcode) {
229
mutex_unlock(&hdev->mgmt_pending_lock);
230
return cmd;
231
}
232
}
233
234
mutex_unlock(&hdev->mgmt_pending_lock);
235
236
return NULL;
237
}
238
239
void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove,
240
void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
241
void *data)
242
{
243
struct mgmt_pending_cmd *cmd, *tmp;
244
245
mutex_lock(&hdev->mgmt_pending_lock);
246
247
list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
248
if (opcode > 0 && cmd->opcode != opcode)
249
continue;
250
251
if (remove)
252
list_del(&cmd->list);
253
254
cb(cmd, data);
255
256
if (remove)
257
mgmt_pending_free(cmd);
258
}
259
260
mutex_unlock(&hdev->mgmt_pending_lock);
261
}
262
263
struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
264
struct hci_dev *hdev,
265
void *data, u16 len)
266
{
267
struct mgmt_pending_cmd *cmd;
268
269
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
270
if (!cmd)
271
return NULL;
272
273
cmd->opcode = opcode;
274
cmd->hdev = hdev;
275
276
cmd->param = kmemdup(data, len, GFP_KERNEL);
277
if (!cmd->param) {
278
kfree(cmd);
279
return NULL;
280
}
281
282
cmd->param_len = len;
283
284
cmd->sk = sk;
285
sock_hold(sk);
286
287
return cmd;
288
}
289
290
struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
291
struct hci_dev *hdev,
292
void *data, u16 len)
293
{
294
struct mgmt_pending_cmd *cmd;
295
296
cmd = mgmt_pending_new(sk, opcode, hdev, data, len);
297
if (!cmd)
298
return NULL;
299
300
mutex_lock(&hdev->mgmt_pending_lock);
301
list_add_tail(&cmd->list, &hdev->mgmt_pending);
302
mutex_unlock(&hdev->mgmt_pending_lock);
303
304
return cmd;
305
}
306
307
void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
308
{
309
sock_put(cmd->sk);
310
kfree(cmd->param);
311
kfree(cmd);
312
}
313
314
void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
315
{
316
mutex_lock(&cmd->hdev->mgmt_pending_lock);
317
list_del(&cmd->list);
318
mutex_unlock(&cmd->hdev->mgmt_pending_lock);
319
320
mgmt_pending_free(cmd);
321
}
322
323
bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
324
{
325
struct mgmt_pending_cmd *tmp;
326
327
lockdep_assert_held(&hdev->mgmt_pending_lock);
328
329
if (!cmd)
330
return false;
331
332
list_for_each_entry(tmp, &hdev->mgmt_pending, list) {
333
if (cmd == tmp)
334
return true;
335
}
336
337
return false;
338
}
339
340
bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
341
{
342
bool listed;
343
344
mutex_lock(&hdev->mgmt_pending_lock);
345
listed = __mgmt_pending_listed(hdev, cmd);
346
mutex_unlock(&hdev->mgmt_pending_lock);
347
348
return listed;
349
}
350
351
bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
352
{
353
bool listed;
354
355
if (!cmd)
356
return false;
357
358
mutex_lock(&hdev->mgmt_pending_lock);
359
360
listed = __mgmt_pending_listed(hdev, cmd);
361
if (listed)
362
list_del(&cmd->list);
363
364
mutex_unlock(&hdev->mgmt_pending_lock);
365
366
return listed;
367
}
368
369
void mgmt_mesh_foreach(struct hci_dev *hdev,
370
void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
371
void *data, struct sock *sk)
372
{
373
struct mgmt_mesh_tx *mesh_tx, *tmp;
374
375
list_for_each_entry_safe(mesh_tx, tmp, &hdev->mesh_pending, list) {
376
if (!sk || mesh_tx->sk == sk)
377
cb(mesh_tx, data);
378
}
379
}
380
381
struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk)
382
{
383
struct mgmt_mesh_tx *mesh_tx;
384
385
if (list_empty(&hdev->mesh_pending))
386
return NULL;
387
388
list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) {
389
if (!sk || mesh_tx->sk == sk)
390
return mesh_tx;
391
}
392
393
return NULL;
394
}
395
396
struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle)
397
{
398
struct mgmt_mesh_tx *mesh_tx;
399
400
if (list_empty(&hdev->mesh_pending))
401
return NULL;
402
403
list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) {
404
if (mesh_tx->handle == handle)
405
return mesh_tx;
406
}
407
408
return NULL;
409
}
410
411
struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev,
412
void *data, u16 len)
413
{
414
struct mgmt_mesh_tx *mesh_tx;
415
416
mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL);
417
if (!mesh_tx)
418
return NULL;
419
420
hdev->mesh_send_ref++;
421
if (!hdev->mesh_send_ref)
422
hdev->mesh_send_ref++;
423
424
mesh_tx->handle = hdev->mesh_send_ref;
425
mesh_tx->index = hdev->id;
426
memcpy(mesh_tx->param, data, len);
427
mesh_tx->param_len = len;
428
mesh_tx->sk = sk;
429
sock_hold(sk);
430
431
list_add_tail(&mesh_tx->list, &hdev->mesh_pending);
432
433
return mesh_tx;
434
}
435
436
void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx)
437
{
438
list_del(&mesh_tx->list);
439
sock_put(mesh_tx->sk);
440
kfree(mesh_tx);
441
}
442
443