Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bluetooth/btintel_pcie.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
*
4
* Bluetooth support for Intel PCIe devices
5
*
6
* Copyright (C) 2024 Intel Corporation
7
*/
8
9
#include <linux/kernel.h>
10
#include <linux/module.h>
11
#include <linux/firmware.h>
12
#include <linux/pci.h>
13
#include <linux/wait.h>
14
#include <linux/delay.h>
15
#include <linux/interrupt.h>
16
17
#include <linux/unaligned.h>
18
#include <linux/devcoredump.h>
19
20
#include <net/bluetooth/bluetooth.h>
21
#include <net/bluetooth/hci_core.h>
22
23
#include "btintel.h"
24
#include "btintel_pcie.h"
25
26
#define VERSION "0.1"
27
28
#define BTINTEL_PCI_DEVICE(dev, subdev) \
29
.vendor = PCI_VENDOR_ID_INTEL, \
30
.device = (dev), \
31
.subvendor = PCI_ANY_ID, \
32
.subdevice = (subdev), \
33
.driver_data = 0
34
35
#define POLL_INTERVAL_US 10
36
37
/* Intel Bluetooth PCIe device id table */
38
static const struct pci_device_id btintel_pcie_table[] = {
39
/* BlazarI, Wildcat Lake */
40
{ BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
41
/* BlazarI, Lunar Lake */
42
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
43
/* Scorpious, Panther Lake-H484 */
44
{ BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
45
/* Scorpious, Panther Lake-H404 */
46
{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
47
{ 0 }
48
};
49
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
50
51
struct btintel_pcie_dev_recovery {
52
struct list_head list;
53
u8 count;
54
time64_t last_error;
55
char name[];
56
};
57
58
/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
59
#define BTINTEL_PCIE_HCI_TYPE_LEN 4
60
#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
61
#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
62
#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
63
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
64
#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
65
66
#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
67
68
#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
69
#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
70
71
#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
72
#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
73
74
#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
75
76
#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
77
#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
78
79
#define BTINTEL_PCIE_RESET_WINDOW_SECS 5
80
#define BTINTEL_PCIE_FLR_MAX_RETRY 1
81
82
/* Alive interrupt context */
83
enum {
84
BTINTEL_PCIE_ROM,
85
BTINTEL_PCIE_FW_DL,
86
BTINTEL_PCIE_HCI_RESET,
87
BTINTEL_PCIE_INTEL_HCI_RESET1,
88
BTINTEL_PCIE_INTEL_HCI_RESET2,
89
BTINTEL_PCIE_D0,
90
BTINTEL_PCIE_D3
91
};
92
93
/* Structure for dbgc fragment buffer
94
* @buf_addr_lsb: LSB of the buffer's physical address
95
* @buf_addr_msb: MSB of the buffer's physical address
96
* @buf_size: Total size of the buffer
97
*/
98
struct btintel_pcie_dbgc_ctxt_buf {
99
u32 buf_addr_lsb;
100
u32 buf_addr_msb;
101
u32 buf_size;
102
};
103
104
/* Structure for dbgc fragment
105
* @magic_num: 0XA5A5A5A5
106
* @ver: For Driver-FW compatibility
107
* @total_size: Total size of the payload debug info
108
* @num_buf: Num of allocated debug bufs
109
* @bufs: All buffer's addresses and sizes
110
*/
111
struct btintel_pcie_dbgc_ctxt {
112
u32 magic_num;
113
u32 ver;
114
u32 total_size;
115
u32 num_buf;
116
struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
117
};
118
119
struct btintel_pcie_removal {
120
struct pci_dev *pdev;
121
struct work_struct work;
122
};
123
124
static LIST_HEAD(btintel_pcie_recovery_list);
125
static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
126
127
static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
128
{
129
switch (alive_intr_ctxt) {
130
case BTINTEL_PCIE_ROM:
131
return "rom";
132
case BTINTEL_PCIE_FW_DL:
133
return "fw_dl";
134
case BTINTEL_PCIE_D0:
135
return "d0";
136
case BTINTEL_PCIE_D3:
137
return "d3";
138
case BTINTEL_PCIE_HCI_RESET:
139
return "hci_reset";
140
case BTINTEL_PCIE_INTEL_HCI_RESET1:
141
return "intel_reset1";
142
case BTINTEL_PCIE_INTEL_HCI_RESET2:
143
return "intel_reset2";
144
default:
145
return "unknown";
146
}
147
}
148
149
/* This function initializes the memory for DBGC buffers and formats the
150
* DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
151
* size as the payload
152
*/
153
static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
154
{
155
struct btintel_pcie_dbgc_ctxt db_frag;
156
struct data_buf *buf;
157
int i;
158
159
data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
160
data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
161
sizeof(*buf), GFP_KERNEL);
162
if (!data->dbgc.bufs)
163
return -ENOMEM;
164
165
data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
166
data->dbgc.count *
167
BTINTEL_PCIE_DBGC_BUFFER_SIZE,
168
&data->dbgc.buf_p_addr,
169
GFP_KERNEL | __GFP_NOWARN);
170
if (!data->dbgc.buf_v_addr)
171
return -ENOMEM;
172
173
data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
174
sizeof(struct btintel_pcie_dbgc_ctxt),
175
&data->dbgc.frag_p_addr,
176
GFP_KERNEL | __GFP_NOWARN);
177
if (!data->dbgc.frag_v_addr)
178
return -ENOMEM;
179
180
data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
181
182
db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
183
db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
184
db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
185
db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
186
187
for (i = 0; i < data->dbgc.count; i++) {
188
buf = &data->dbgc.bufs[i];
189
buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
190
buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
191
db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
192
db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
193
db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
194
}
195
196
memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
197
return 0;
198
}
199
200
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
201
u16 queue_num)
202
{
203
bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
204
queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
205
ia->tr_hia[queue_num], ia->tr_tia[queue_num],
206
ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
207
}
208
209
static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
210
u16 index)
211
{
212
bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
213
index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
214
}
215
216
static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
217
{
218
u8 queue = entry->entry;
219
struct msix_entry *entries = entry - queue;
220
221
return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
222
}
223
224
/* Set the doorbell for TXQ to notify the device that @index (actually index-1)
225
* of the TFD is updated and ready to transmit.
226
*/
227
static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
228
{
229
u32 val;
230
231
val = index;
232
val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
233
234
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
235
}
236
237
/* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
238
* descriptor) with the data length and the DMA address of the data buffer.
239
*/
240
static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
241
struct sk_buff *skb)
242
{
243
struct data_buf *buf;
244
struct tfd *tfd;
245
246
tfd = &txq->tfds[tfd_index];
247
memset(tfd, 0, sizeof(*tfd));
248
249
buf = &txq->bufs[tfd_index];
250
251
tfd->size = skb->len;
252
tfd->addr = buf->data_p_addr;
253
254
/* Copy the outgoing data to DMA buffer */
255
memcpy(buf->data, skb->data, tfd->size);
256
}
257
258
static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
259
{
260
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
261
u16 cr_hia, cr_tia;
262
u32 reg, mbox_reg;
263
struct sk_buff *skb;
264
u8 buf[80];
265
266
skb = alloc_skb(1024, GFP_ATOMIC);
267
if (!skb)
268
return;
269
270
snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
271
bt_dev_dbg(hdev, "%s", buf);
272
skb_put_data(skb, buf, strlen(buf));
273
274
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
275
snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
276
bt_dev_dbg(hdev, "%s", buf);
277
skb_put_data(skb, buf, strlen(buf));
278
data->boot_stage_cache = reg;
279
280
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
281
snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
282
skb_put_data(skb, buf, strlen(buf));
283
bt_dev_dbg(hdev, "%s", buf);
284
285
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
286
snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
287
skb_put_data(skb, buf, strlen(buf));
288
bt_dev_dbg(hdev, "%s", buf);
289
290
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
291
snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
292
skb_put_data(skb, buf, strlen(buf));
293
bt_dev_dbg(hdev, "%s", buf);
294
295
/*Read the Mail box status and registers*/
296
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
297
snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
298
skb_put_data(skb, buf, strlen(buf));
299
if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
300
mbox_reg = btintel_pcie_rd_reg32(data,
301
BTINTEL_PCIE_CSR_MBOX_1_REG);
302
snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
303
skb_put_data(skb, buf, strlen(buf));
304
bt_dev_dbg(hdev, "%s", buf);
305
}
306
307
if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
308
mbox_reg = btintel_pcie_rd_reg32(data,
309
BTINTEL_PCIE_CSR_MBOX_2_REG);
310
snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
311
skb_put_data(skb, buf, strlen(buf));
312
bt_dev_dbg(hdev, "%s", buf);
313
}
314
315
if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
316
mbox_reg = btintel_pcie_rd_reg32(data,
317
BTINTEL_PCIE_CSR_MBOX_3_REG);
318
snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
319
skb_put_data(skb, buf, strlen(buf));
320
bt_dev_dbg(hdev, "%s", buf);
321
}
322
323
if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
324
mbox_reg = btintel_pcie_rd_reg32(data,
325
BTINTEL_PCIE_CSR_MBOX_4_REG);
326
snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
327
skb_put_data(skb, buf, strlen(buf));
328
bt_dev_dbg(hdev, "%s", buf);
329
}
330
331
cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
332
cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
333
snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
334
skb_put_data(skb, buf, strlen(buf));
335
bt_dev_dbg(hdev, "%s", buf);
336
337
cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
338
cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
339
snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
340
skb_put_data(skb, buf, strlen(buf));
341
bt_dev_dbg(hdev, "%s", buf);
342
snprintf(buf, sizeof(buf), "--------------------------------");
343
bt_dev_dbg(hdev, "%s", buf);
344
345
hci_recv_diag(hdev, skb);
346
}
347
348
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
349
struct sk_buff *skb, u32 pkt_type, u16 opcode)
350
{
351
int ret;
352
u16 tfd_index;
353
u32 old_ctxt;
354
bool wait_on_alive = false;
355
struct hci_dev *hdev = data->hdev;
356
357
struct txq *txq = &data->txq;
358
359
tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
360
361
if (tfd_index > txq->count)
362
return -ERANGE;
363
364
/* Firmware raises alive interrupt on HCI_OP_RESET or
365
* BTINTEL_HCI_OP_RESET
366
*/
367
wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
368
(opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
369
370
if (wait_on_alive) {
371
data->gp0_received = false;
372
old_ctxt = data->alive_intr_ctxt;
373
data->alive_intr_ctxt =
374
(opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
375
BTINTEL_PCIE_HCI_RESET);
376
bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
377
opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
378
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
379
}
380
381
memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
382
BTINTEL_PCIE_HCI_TYPE_LEN);
383
384
/* Prepare for TX. It updates the TFD with the length of data and
385
* address of the DMA buffer, and copy the data to the DMA buffer
386
*/
387
btintel_pcie_prepare_tx(txq, tfd_index, skb);
388
389
tfd_index = (tfd_index + 1) % txq->count;
390
data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
391
392
/* Arm wait event condition */
393
data->tx_wait_done = false;
394
395
/* Set the doorbell to notify the device */
396
btintel_pcie_set_tx_db(data, tfd_index);
397
398
/* Wait for the complete interrupt - URBD0 */
399
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
400
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
401
if (!ret) {
402
bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
403
BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
404
btintel_pcie_dump_debug_registers(data->hdev);
405
return -ETIME;
406
}
407
408
if (wait_on_alive) {
409
ret = wait_event_timeout(data->gp0_wait_q,
410
data->gp0_received,
411
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
412
if (!ret) {
413
hdev->stat.err_tx++;
414
bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
415
BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
416
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
417
return -ETIME;
418
}
419
}
420
return 0;
421
}
422
423
/* Set the doorbell for RXQ to notify the device that @index (actually index-1)
424
* is available to receive the data
425
*/
426
static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
427
{
428
u32 val;
429
430
val = index;
431
val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
432
433
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
434
}
435
436
/* Update the FRBD (free buffer descriptor) with the @frbd_index and the
437
* DMA address of the free buffer.
438
*/
439
static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
440
{
441
struct data_buf *buf;
442
struct frbd *frbd;
443
444
/* Get the buffer of the FRBD for DMA */
445
buf = &rxq->bufs[frbd_index];
446
447
frbd = &rxq->frbds[frbd_index];
448
memset(frbd, 0, sizeof(*frbd));
449
450
/* Update FRBD */
451
frbd->tag = frbd_index;
452
frbd->addr = buf->data_p_addr;
453
}
454
455
static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
456
{
457
u16 frbd_index;
458
struct rxq *rxq = &data->rxq;
459
460
frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
461
462
if (frbd_index > rxq->count)
463
return -ERANGE;
464
465
/* Prepare for RX submit. It updates the FRBD with the address of DMA
466
* buffer
467
*/
468
btintel_pcie_prepare_rx(rxq, frbd_index);
469
470
frbd_index = (frbd_index + 1) % rxq->count;
471
data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
472
ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
473
474
/* Set the doorbell to notify the device */
475
btintel_pcie_set_rx_db(data, frbd_index);
476
477
return 0;
478
}
479
480
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
481
{
482
int i, ret;
483
struct rxq *rxq = &data->rxq;
484
485
/* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
486
* hardware issues leading to race condition at the firmware.
487
*/
488
489
for (i = 0; i < rxq->count - 3; i++) {
490
ret = btintel_pcie_submit_rx(data);
491
if (ret)
492
return ret;
493
}
494
495
return 0;
496
}
497
498
static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
499
{
500
memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
501
memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
502
memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
503
memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
504
}
505
506
static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
507
{
508
u32 reg;
509
int retry = 3;
510
511
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
512
513
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
514
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
515
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
516
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
517
518
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
519
520
do {
521
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
522
if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
523
break;
524
usleep_range(10000, 12000);
525
526
} while (--retry > 0);
527
usleep_range(10000, 12000);
528
529
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
530
531
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
532
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
533
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
534
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
535
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
536
usleep_range(10000, 12000);
537
538
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
539
bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
540
541
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
542
543
/* If shared hardware reset is success then boot stage register shall be
544
* set to 0
545
*/
546
return reg == 0 ? 0 : -ENODEV;
547
}
548
549
static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
550
{
551
u32 reg;
552
553
/* Set MAC_INIT bit to start primary bootloader */
554
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
555
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
556
BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
557
BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
558
reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
559
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
560
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
561
}
562
563
static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
564
{
565
u32 reg;
566
int retry = 15;
567
568
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
569
570
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
571
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
572
if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
573
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
574
575
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
576
577
do {
578
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
579
if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
580
return 0;
581
/* Need delay here for Target Access harwdware to settle down*/
582
usleep_range(1000, 1200);
583
584
} while (--retry > 0);
585
586
return -ETIME;
587
}
588
589
static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
590
{
591
u32 reg;
592
593
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
594
595
if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
596
reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
597
598
if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
599
reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
600
601
if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
602
reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
603
604
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
605
}
606
607
static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
608
void *data, size_t size)
609
{
610
struct intel_tlv *tlv;
611
612
tlv = dest;
613
tlv->type = type;
614
tlv->len = size;
615
memcpy(tlv->val, data, tlv->len);
616
return dest + sizeof(*tlv) + size;
617
}
618
619
static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
620
{
621
u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
622
struct btintel_pcie_dbgc *dbgc = &data->dbgc;
623
struct hci_dev *hdev = data->hdev;
624
u8 *pdata, *p, buf_idx;
625
struct intel_tlv *tlv;
626
struct timespec64 now;
627
struct tm tm_now;
628
char fw_build[128];
629
char ts[128];
630
char vendor[64];
631
char driver[64];
632
633
if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
634
return -EOPNOTSUPP;
635
636
637
wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
638
offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
639
640
buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
641
if (buf_idx > dbgc->count) {
642
bt_dev_warn(hdev, "Buffer index is invalid");
643
return -EINVAL;
644
}
645
646
prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
647
if (prev_size + offset >= prev_size)
648
data->dmp_hdr.write_ptr = prev_size + offset;
649
else
650
return -EINVAL;
651
652
snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
653
snprintf(driver, sizeof(driver), "Driver: %s\n",
654
data->dmp_hdr.driver_name);
655
656
ktime_get_real_ts64(&now);
657
time64_to_tm(now.tv_sec, 0, &tm_now);
658
snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
659
tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
660
tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
661
662
snprintf(fw_build, sizeof(fw_build),
663
"Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
664
2000 + (data->dmp_hdr.fw_timestamp >> 8),
665
data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
666
data->dmp_hdr.fw_build_num);
667
668
data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
669
sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
670
sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
671
sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
672
sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
673
sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
674
sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
675
sizeof(*tlv) + strlen(ts) +
676
sizeof(*tlv) + strlen(fw_build) +
677
sizeof(*tlv) + strlen(vendor) +
678
sizeof(*tlv) + strlen(driver);
679
680
/*
681
* sizeof(u32) - signature
682
* sizeof(data_len) - to store tlv data size
683
* data_len - TLV data
684
*/
685
dump_size = sizeof(u32) + sizeof(data_len) + data_len;
686
687
688
/* Add debug buffers data length to dump size */
689
dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
690
691
pdata = vmalloc(dump_size);
692
if (!pdata)
693
return -ENOMEM;
694
p = pdata;
695
696
*(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
697
p += sizeof(u32);
698
699
*(u32 *)p = data_len;
700
p += sizeof(u32);
701
702
703
p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
704
p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
705
p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
706
p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
707
strlen(fw_build));
708
p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
709
sizeof(data->dmp_hdr.cnvi_bt));
710
p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
711
sizeof(data->dmp_hdr.write_ptr));
712
p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
713
sizeof(data->dmp_hdr.wrap_ctr));
714
715
data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
716
BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
717
718
p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
719
sizeof(data->dmp_hdr.trigger_reason));
720
p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
721
sizeof(data->dmp_hdr.fw_git_sha1));
722
p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
723
sizeof(data->dmp_hdr.cnvr_top));
724
p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
725
sizeof(data->dmp_hdr.cnvi_top));
726
727
memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
728
dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
729
return 0;
730
}
731
732
static void btintel_pcie_dump_traces(struct hci_dev *hdev)
733
{
734
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
735
int ret = 0;
736
737
ret = btintel_pcie_get_mac_access(data);
738
if (ret) {
739
bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
740
return;
741
}
742
743
ret = btintel_pcie_read_dram_buffers(data);
744
745
btintel_pcie_release_mac_access(data);
746
747
if (ret)
748
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
749
}
750
751
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
752
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
753
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
754
* Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
755
* from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
756
*/
757
static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
758
{
759
int err;
760
u32 reg;
761
762
data->gp0_received = false;
763
764
/* Update the DMA address of CI struct to CSR */
765
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
766
data->ci_p_addr & 0xffffffff);
767
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
768
(u64)data->ci_p_addr >> 32);
769
770
/* Reset the cached value of boot stage. it is updated by the MSI-X
771
* gp0 interrupt handler.
772
*/
773
data->boot_stage_cache = 0x0;
774
775
/* Set MAC_INIT bit to start primary bootloader */
776
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
777
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
778
BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
779
BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
780
reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
781
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
782
783
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
784
785
/* MAC is ready. Enable BT FUNC */
786
btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
787
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
788
789
btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
790
791
/* wait for interrupt from the device after booting up to primary
792
* bootloader.
793
*/
794
data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
795
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
796
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
797
if (!err)
798
return -ETIME;
799
800
/* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
801
if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
802
return -ENODEV;
803
804
return 0;
805
}
806
807
static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
808
{
809
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
810
}
811
812
static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
813
{
814
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
815
!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
816
}
817
818
static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
819
{
820
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
821
}
822
823
static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
824
{
825
return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
826
}
827
828
static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
829
u32 dxstate)
830
{
831
bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
832
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
833
}
834
835
static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
836
void *buf, u32 dev_addr, int len)
837
{
838
int err;
839
u32 *val = buf;
840
841
/* Get device mac access */
842
err = btintel_pcie_get_mac_access(data);
843
if (err) {
844
bt_dev_err(data->hdev, "Failed to get mac access %d", err);
845
return err;
846
}
847
848
for (; len > 0; len -= 4, dev_addr += 4, val++)
849
*val = btintel_pcie_rd_dev_mem(data, dev_addr);
850
851
btintel_pcie_release_mac_access(data);
852
853
return 0;
854
}
855
856
static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
857
{
858
return (data->boot_stage_cache &
859
BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
860
(data->boot_stage_cache &
861
BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
862
}
863
864
static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
865
{
866
return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
867
(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
868
}
869
870
static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
871
{
872
bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
873
btintel_pcie_dump_debug_registers(data->hdev);
874
}
875
876
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
877
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
878
*/
879
static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
880
{
881
bool submit_rx, signal_waitq;
882
u32 reg, old_ctxt;
883
884
/* This interrupt is for three different causes and it is not easy to
885
* know what causes the interrupt. So, it compares each register value
886
* with cached value and update it before it wake up the queue.
887
*/
888
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
889
if (reg != data->boot_stage_cache)
890
data->boot_stage_cache = reg;
891
892
bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
893
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
894
data->boot_stage_cache, reg);
895
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
896
if (reg != data->img_resp_cache)
897
data->img_resp_cache = reg;
898
899
if (btintel_pcie_in_error(data)) {
900
bt_dev_err(data->hdev, "Controller in error state");
901
btintel_pcie_dump_debug_registers(data->hdev);
902
return;
903
}
904
905
if (btintel_pcie_in_lockdown(data)) {
906
bt_dev_err(data->hdev, "Controller in lockdown state");
907
btintel_pcie_dump_debug_registers(data->hdev);
908
return;
909
}
910
911
data->gp0_received = true;
912
913
old_ctxt = data->alive_intr_ctxt;
914
submit_rx = false;
915
signal_waitq = false;
916
917
switch (data->alive_intr_ctxt) {
918
case BTINTEL_PCIE_ROM:
919
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
920
signal_waitq = true;
921
break;
922
case BTINTEL_PCIE_FW_DL:
923
/* Error case is already handled. Ideally control shall not
924
* reach here
925
*/
926
break;
927
case BTINTEL_PCIE_INTEL_HCI_RESET1:
928
if (btintel_pcie_in_op(data)) {
929
submit_rx = true;
930
signal_waitq = true;
931
break;
932
}
933
934
if (btintel_pcie_in_iml(data)) {
935
submit_rx = true;
936
signal_waitq = true;
937
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
938
break;
939
}
940
break;
941
case BTINTEL_PCIE_INTEL_HCI_RESET2:
942
if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
943
btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
944
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
945
}
946
break;
947
case BTINTEL_PCIE_D0:
948
if (btintel_pcie_in_d3(data)) {
949
data->alive_intr_ctxt = BTINTEL_PCIE_D3;
950
signal_waitq = true;
951
break;
952
}
953
break;
954
case BTINTEL_PCIE_D3:
955
if (btintel_pcie_in_d0(data)) {
956
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
957
submit_rx = true;
958
signal_waitq = true;
959
break;
960
}
961
break;
962
case BTINTEL_PCIE_HCI_RESET:
963
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
964
submit_rx = true;
965
signal_waitq = true;
966
break;
967
default:
968
bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
969
data->alive_intr_ctxt);
970
break;
971
}
972
973
if (submit_rx) {
974
btintel_pcie_reset_ia(data);
975
btintel_pcie_start_rx(data);
976
}
977
978
if (signal_waitq) {
979
bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
980
wake_up(&data->gp0_wait_q);
981
}
982
983
if (old_ctxt != data->alive_intr_ctxt)
984
bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
985
btintel_pcie_alivectxt_state2str(old_ctxt),
986
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
987
}
988
989
/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
990
*/
991
static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
992
{
993
u16 cr_tia, cr_hia;
994
struct txq *txq;
995
struct urbd0 *urbd0;
996
997
cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
998
cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
999
1000
if (cr_tia == cr_hia)
1001
return;
1002
1003
txq = &data->txq;
1004
1005
while (cr_tia != cr_hia) {
1006
data->tx_wait_done = true;
1007
wake_up(&data->tx_wait_q);
1008
1009
urbd0 = &txq->urbd0s[cr_tia];
1010
1011
if (urbd0->tfd_index > txq->count)
1012
return;
1013
1014
cr_tia = (cr_tia + 1) % txq->count;
1015
data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
1016
ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
1017
}
1018
}
1019
1020
static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1021
{
1022
struct hci_event_hdr *hdr = (void *)skb->data;
1023
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1024
1025
if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
1026
hdr->plen > 0) {
1027
const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
1028
unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
1029
1030
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1031
switch (skb->data[2]) {
1032
case 0x02:
1033
/* When switching to the operational firmware
1034
* the device sends a vendor specific event
1035
* indicating that the bootup completed.
1036
*/
1037
btintel_bootup(hdev, ptr, len);
1038
1039
/* If bootup event is from operational image,
1040
* driver needs to write sleep control register to
1041
* move into D0 state
1042
*/
1043
if (btintel_pcie_in_op(data)) {
1044
btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
1045
data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
1046
kfree_skb(skb);
1047
return 0;
1048
}
1049
1050
if (btintel_pcie_in_iml(data)) {
1051
/* In case of IML, there is no concept
1052
* of D0 transition. Just mimic as if
1053
* IML moved to D0 by clearing INTEL_WAIT_FOR_D0
1054
* bit and waking up the task waiting on
1055
* INTEL_WAIT_FOR_D0. This is required
1056
* as intel_boot() is common function for
1057
* both IML and OP image loading.
1058
*/
1059
if (btintel_test_and_clear_flag(data->hdev,
1060
INTEL_WAIT_FOR_D0))
1061
btintel_wake_up_flag(data->hdev,
1062
INTEL_WAIT_FOR_D0);
1063
}
1064
kfree_skb(skb);
1065
return 0;
1066
case 0x06:
1067
/* When the firmware loading completes the
1068
* device sends out a vendor specific event
1069
* indicating the result of the firmware
1070
* loading.
1071
*/
1072
btintel_secure_send_result(hdev, ptr, len);
1073
kfree_skb(skb);
1074
return 0;
1075
}
1076
}
1077
1078
/* This is a debug event that comes from IML and OP image when it
1079
* starts execution. There is no need pass this event to stack.
1080
*/
1081
if (skb->data[2] == 0x97) {
1082
hci_recv_diag(hdev, skb);
1083
return 0;
1084
}
1085
}
1086
1087
return hci_recv_frame(hdev, skb);
1088
}
1089
/* Process the received rx data
1090
* It check the frame header to identify the data type and create skb
1091
* and calling HCI API
1092
*/
1093
static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
1094
struct sk_buff *skb)
1095
{
1096
int ret;
1097
u8 pkt_type;
1098
u16 plen;
1099
u32 pcie_pkt_type;
1100
void *pdata;
1101
struct hci_dev *hdev = data->hdev;
1102
1103
spin_lock(&data->hci_rx_lock);
1104
1105
/* The first 4 bytes indicates the Intel PCIe specific packet type */
1106
pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
1107
if (!pdata) {
1108
bt_dev_err(hdev, "Corrupted packet received");
1109
ret = -EILSEQ;
1110
goto exit_error;
1111
}
1112
1113
pcie_pkt_type = get_unaligned_le32(pdata);
1114
1115
switch (pcie_pkt_type) {
1116
case BTINTEL_PCIE_HCI_ACL_PKT:
1117
if (skb->len >= HCI_ACL_HDR_SIZE) {
1118
plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
1119
pkt_type = HCI_ACLDATA_PKT;
1120
} else {
1121
bt_dev_err(hdev, "ACL packet is too short");
1122
ret = -EILSEQ;
1123
goto exit_error;
1124
}
1125
break;
1126
1127
case BTINTEL_PCIE_HCI_SCO_PKT:
1128
if (skb->len >= HCI_SCO_HDR_SIZE) {
1129
plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1130
pkt_type = HCI_SCODATA_PKT;
1131
} else {
1132
bt_dev_err(hdev, "SCO packet is too short");
1133
ret = -EILSEQ;
1134
goto exit_error;
1135
}
1136
break;
1137
1138
case BTINTEL_PCIE_HCI_EVT_PKT:
1139
if (skb->len >= HCI_EVENT_HDR_SIZE) {
1140
plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1141
pkt_type = HCI_EVENT_PKT;
1142
} else {
1143
bt_dev_err(hdev, "Event packet is too short");
1144
ret = -EILSEQ;
1145
goto exit_error;
1146
}
1147
break;
1148
1149
case BTINTEL_PCIE_HCI_ISO_PKT:
1150
if (skb->len >= HCI_ISO_HDR_SIZE) {
1151
plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1152
pkt_type = HCI_ISODATA_PKT;
1153
} else {
1154
bt_dev_err(hdev, "ISO packet is too short");
1155
ret = -EILSEQ;
1156
goto exit_error;
1157
}
1158
break;
1159
1160
default:
1161
bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1162
pcie_pkt_type);
1163
ret = -EINVAL;
1164
goto exit_error;
1165
}
1166
1167
if (skb->len < plen) {
1168
bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1169
pkt_type);
1170
ret = -EILSEQ;
1171
goto exit_error;
1172
}
1173
1174
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1175
1176
hci_skb_pkt_type(skb) = pkt_type;
1177
hdev->stat.byte_rx += plen;
1178
skb_trim(skb, plen);
1179
1180
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1181
ret = btintel_pcie_recv_event(hdev, skb);
1182
else
1183
ret = hci_recv_frame(hdev, skb);
1184
skb = NULL; /* skb is freed in the callee */
1185
1186
exit_error:
1187
if (skb)
1188
kfree_skb(skb);
1189
1190
if (ret)
1191
hdev->stat.err_rx++;
1192
1193
spin_unlock(&data->hci_rx_lock);
1194
1195
return ret;
1196
}
1197
1198
static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1199
{
1200
int len, err, offset, pending;
1201
struct sk_buff *skb;
1202
u8 *buf, prefix[64];
1203
u32 addr, val;
1204
u16 pkt_len;
1205
1206
struct tlv {
1207
u8 type;
1208
__le16 len;
1209
u8 val[];
1210
} __packed;
1211
1212
struct tlv *tlv;
1213
1214
switch (data->dmp_hdr.cnvi_top & 0xfff) {
1215
case BTINTEL_CNVI_BLAZARI:
1216
case BTINTEL_CNVI_BLAZARIW:
1217
/* only from step B0 onwards */
1218
if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1219
return;
1220
len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1221
addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1222
break;
1223
case BTINTEL_CNVI_SCP:
1224
len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1225
addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1226
break;
1227
default:
1228
bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1229
return;
1230
}
1231
1232
buf = kzalloc(len, GFP_KERNEL);
1233
if (!buf)
1234
goto exit_on_error;
1235
1236
btintel_pcie_mac_init(data);
1237
1238
err = btintel_pcie_read_device_mem(data, buf, addr, len);
1239
if (err)
1240
goto exit_on_error;
1241
1242
val = get_unaligned_le32(buf);
1243
if (val != BTINTEL_PCIE_MAGIC_NUM) {
1244
bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1245
val);
1246
goto exit_on_error;
1247
}
1248
1249
snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1250
1251
offset = 4;
1252
do {
1253
pending = len - offset;
1254
if (pending < sizeof(*tlv))
1255
break;
1256
tlv = (struct tlv *)(buf + offset);
1257
1258
/* If type == 0, then there are no more TLVs to be parsed */
1259
if (!tlv->type) {
1260
bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1261
break;
1262
}
1263
pkt_len = le16_to_cpu(tlv->len);
1264
offset += sizeof(*tlv);
1265
pending = len - offset;
1266
if (pkt_len > pending)
1267
break;
1268
1269
offset += pkt_len;
1270
1271
/* Only TLVs of type == 1 are HCI events, no need to process other
1272
* TLVs
1273
*/
1274
if (tlv->type != 1)
1275
continue;
1276
1277
bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1278
if (pkt_len > HCI_MAX_EVENT_SIZE)
1279
break;
1280
skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1281
if (!skb)
1282
goto exit_on_error;
1283
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1284
skb_put_data(skb, tlv->val, pkt_len);
1285
1286
/* copy Intel specific pcie packet type */
1287
val = BTINTEL_PCIE_HCI_EVT_PKT;
1288
memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1289
BTINTEL_PCIE_HCI_TYPE_LEN);
1290
1291
print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1292
tlv->val, pkt_len, false);
1293
1294
btintel_pcie_recv_frame(data, skb);
1295
} while (offset < len);
1296
1297
exit_on_error:
1298
kfree(buf);
1299
}
1300
1301
static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1302
{
1303
bt_dev_err(data->hdev, "Received hw exception interrupt");
1304
1305
if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1306
return;
1307
1308
if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1309
return;
1310
1311
/* Trigger device core dump when there is HW exception */
1312
if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1313
data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1314
1315
queue_work(data->workqueue, &data->rx_work);
1316
}
1317
1318
static void btintel_pcie_rx_work(struct work_struct *work)
1319
{
1320
struct btintel_pcie_data *data = container_of(work,
1321
struct btintel_pcie_data, rx_work);
1322
struct sk_buff *skb;
1323
1324
if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1325
btintel_pcie_dump_traces(data->hdev);
1326
clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1327
}
1328
1329
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1330
/* Unlike usb products, controller will not send hardware
1331
* exception event on exception. Instead controller writes the
1332
* hardware event to device memory along with optional debug
1333
* events, raises MSIX and halts. Driver shall read the
1334
* exception event from device memory and passes it stack for
1335
* further processing.
1336
*/
1337
btintel_pcie_read_hwexp(data);
1338
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1339
}
1340
1341
/* Process the sk_buf in queue and send to the HCI layer */
1342
while ((skb = skb_dequeue(&data->rx_skb_q))) {
1343
btintel_pcie_recv_frame(data, skb);
1344
}
1345
}
1346
1347
/* create sk_buff with data and save it to queue and start RX work */
1348
static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1349
void *buf)
1350
{
1351
int ret, len;
1352
struct rfh_hdr *rfh_hdr;
1353
struct sk_buff *skb;
1354
1355
rfh_hdr = buf;
1356
1357
len = rfh_hdr->packet_len;
1358
if (len <= 0) {
1359
ret = -EINVAL;
1360
goto resubmit;
1361
}
1362
1363
/* Remove RFH header */
1364
buf += sizeof(*rfh_hdr);
1365
1366
skb = alloc_skb(len, GFP_ATOMIC);
1367
if (!skb)
1368
goto resubmit;
1369
1370
skb_put_data(skb, buf, len);
1371
skb_queue_tail(&data->rx_skb_q, skb);
1372
queue_work(data->workqueue, &data->rx_work);
1373
1374
resubmit:
1375
ret = btintel_pcie_submit_rx(data);
1376
1377
return ret;
1378
}
1379
1380
/* Handles the MSI-X interrupt for rx queue 1 which is for RX */
1381
static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1382
{
1383
u16 cr_hia, cr_tia;
1384
struct rxq *rxq;
1385
struct urbd1 *urbd1;
1386
struct data_buf *buf;
1387
int ret;
1388
struct hci_dev *hdev = data->hdev;
1389
1390
cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1391
cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1392
1393
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1394
1395
/* Check CR_TIA and CR_HIA for change */
1396
if (cr_tia == cr_hia)
1397
return;
1398
1399
rxq = &data->rxq;
1400
1401
/* The firmware sends multiple CD in a single MSI-X and it needs to
1402
* process all received CDs in this interrupt.
1403
*/
1404
while (cr_tia != cr_hia) {
1405
urbd1 = &rxq->urbd1s[cr_tia];
1406
ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1407
1408
buf = &rxq->bufs[urbd1->frbd_tag];
1409
if (!buf) {
1410
bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1411
urbd1->frbd_tag);
1412
return;
1413
}
1414
1415
ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1416
buf->data);
1417
if (ret) {
1418
bt_dev_err(hdev, "RXQ: failed to submit rx request");
1419
return;
1420
}
1421
1422
cr_tia = (cr_tia + 1) % rxq->count;
1423
data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1424
ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1425
}
1426
}
1427
1428
static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
1429
{
1430
return IRQ_WAKE_THREAD;
1431
}
1432
1433
static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1434
{
1435
return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1436
}
1437
1438
static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1439
{
1440
return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1441
}
1442
1443
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1444
{
1445
struct msix_entry *entry = dev_id;
1446
struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1447
u32 intr_fh, intr_hw;
1448
1449
spin_lock(&data->irq_lock);
1450
intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1451
intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1452
1453
/* Clear causes registers to avoid being handling the same cause */
1454
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1455
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1456
spin_unlock(&data->irq_lock);
1457
1458
if (unlikely(!(intr_fh | intr_hw))) {
1459
/* Ignore interrupt, inta == 0 */
1460
return IRQ_NONE;
1461
}
1462
1463
/* This interrupt is raised when there is an hardware exception */
1464
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1465
btintel_pcie_msix_hw_exp_handler(data);
1466
1467
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
1468
btintel_pcie_msix_gp1_handler(data);
1469
1470
/* This interrupt is triggered by the firmware after updating
1471
* boot_stage register and image_response register
1472
*/
1473
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1474
btintel_pcie_msix_gp0_handler(data);
1475
1476
/* For TX */
1477
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1478
btintel_pcie_msix_tx_handle(data);
1479
if (!btintel_pcie_is_rxq_empty(data))
1480
btintel_pcie_msix_rx_handle(data);
1481
}
1482
1483
/* For RX */
1484
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1485
btintel_pcie_msix_rx_handle(data);
1486
if (!btintel_pcie_is_txackq_empty(data))
1487
btintel_pcie_msix_tx_handle(data);
1488
}
1489
1490
/*
1491
* Before sending the interrupt the HW disables it to prevent a nested
1492
* interrupt. This is done by writing 1 to the corresponding bit in
1493
* the mask register. After handling the interrupt, it should be
1494
* re-enabled by clearing this bit. This register is defined as write 1
1495
* clear (W1C) register, meaning that it's cleared by writing 1
1496
* to the bit.
1497
*/
1498
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1499
BIT(entry->entry));
1500
1501
return IRQ_HANDLED;
1502
}
1503
1504
/* This function requests the irq for MSI-X and registers the handlers per irq.
1505
* Currently, it requests only 1 irq for all interrupt causes.
1506
*/
1507
static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1508
{
1509
int err;
1510
int num_irqs, i;
1511
1512
for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1513
data->msix_entries[i].entry = i;
1514
1515
num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1516
BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1517
if (num_irqs < 0)
1518
return num_irqs;
1519
1520
data->alloc_vecs = num_irqs;
1521
data->msix_enabled = 1;
1522
data->def_irq = 0;
1523
1524
/* setup irq handler */
1525
for (i = 0; i < data->alloc_vecs; i++) {
1526
struct msix_entry *msix_entry;
1527
1528
msix_entry = &data->msix_entries[i];
1529
msix_entry->vector = pci_irq_vector(data->pdev, i);
1530
1531
err = devm_request_threaded_irq(&data->pdev->dev,
1532
msix_entry->vector,
1533
btintel_pcie_msix_isr,
1534
btintel_pcie_irq_msix_handler,
1535
IRQF_SHARED,
1536
KBUILD_MODNAME,
1537
msix_entry);
1538
if (err) {
1539
pci_free_irq_vectors(data->pdev);
1540
data->alloc_vecs = 0;
1541
return err;
1542
}
1543
}
1544
return 0;
1545
}
1546
1547
struct btintel_pcie_causes_list {
1548
u32 cause;
1549
u32 mask_reg;
1550
u8 cause_num;
1551
};
1552
1553
static struct btintel_pcie_causes_list causes_list[] = {
1554
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1555
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1556
{ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1557
{ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1558
};
1559
1560
/* This function configures the interrupt masks for both HW_INT_CAUSES and
1561
* FH_INT_CAUSES which are meaningful to us.
1562
*
1563
* After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1564
* need to call this function again to configure since the masks
1565
* are reset to 0xFFFFFFFF after reset.
1566
*/
1567
static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1568
{
1569
int i;
1570
int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1571
1572
/* Set Non Auto Clear Cause */
1573
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1574
btintel_pcie_wr_reg8(data,
1575
BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1576
val);
1577
btintel_pcie_clr_reg_bits(data,
1578
causes_list[i].mask_reg,
1579
causes_list[i].cause);
1580
}
1581
1582
/* Save the initial interrupt mask */
1583
data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1584
data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1585
}
1586
1587
static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1588
struct btintel_pcie_data *data)
1589
{
1590
int err;
1591
1592
err = pcim_enable_device(pdev);
1593
if (err)
1594
return err;
1595
1596
pci_set_master(pdev);
1597
1598
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1599
if (err) {
1600
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1601
if (err)
1602
return err;
1603
}
1604
1605
data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1606
if (IS_ERR(data->base_addr))
1607
return PTR_ERR(data->base_addr);
1608
1609
err = btintel_pcie_setup_irq(data);
1610
if (err)
1611
return err;
1612
1613
/* Configure MSI-X with causes list */
1614
btintel_pcie_config_msix(data);
1615
1616
return 0;
1617
}
1618
1619
static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1620
struct ctx_info *ci)
1621
{
1622
ci->version = 0x1;
1623
ci->size = sizeof(*ci);
1624
ci->config = 0x0000;
1625
ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1626
ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1627
ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1628
ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1629
ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1630
ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1631
ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1632
ci->addr_tfdq = data->txq.tfds_p_addr;
1633
ci->num_tfdq = data->txq.count;
1634
ci->num_urbdq0 = data->txq.count;
1635
ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1636
ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1637
ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1638
ci->addr_frbdq = data->rxq.frbds_p_addr;
1639
ci->num_frbdq = data->rxq.count;
1640
ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1641
ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1642
ci->num_urbdq1 = data->rxq.count;
1643
ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1644
1645
ci->dbg_output_mode = 0x01;
1646
ci->dbgc_addr = data->dbgc.frag_p_addr;
1647
ci->dbgc_size = data->dbgc.frag_size;
1648
ci->dbg_preset = 0x00;
1649
}
1650
1651
static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1652
struct txq *txq)
1653
{
1654
/* Free data buffers first */
1655
dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1656
txq->buf_v_addr, txq->buf_p_addr);
1657
kfree(txq->bufs);
1658
}
1659
1660
static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1661
struct txq *txq)
1662
{
1663
int i;
1664
struct data_buf *buf;
1665
1666
/* Allocate the same number of buffers as the descriptor */
1667
txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
1668
if (!txq->bufs)
1669
return -ENOMEM;
1670
1671
/* Allocate full chunk of data buffer for DMA first and do indexing and
1672
* initialization next, so it can be freed easily
1673
*/
1674
txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1675
txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1676
&txq->buf_p_addr,
1677
GFP_KERNEL | __GFP_NOWARN);
1678
if (!txq->buf_v_addr) {
1679
kfree(txq->bufs);
1680
return -ENOMEM;
1681
}
1682
1683
/* Setup the allocated DMA buffer to bufs. Each data_buf should
1684
* have virtual address and physical address
1685
*/
1686
for (i = 0; i < txq->count; i++) {
1687
buf = &txq->bufs[i];
1688
buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1689
buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1690
}
1691
1692
return 0;
1693
}
1694
1695
static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1696
struct rxq *rxq)
1697
{
1698
/* Free data buffers first */
1699
dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1700
rxq->buf_v_addr, rxq->buf_p_addr);
1701
kfree(rxq->bufs);
1702
}
1703
1704
static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1705
struct rxq *rxq)
1706
{
1707
int i;
1708
struct data_buf *buf;
1709
1710
/* Allocate the same number of buffers as the descriptor */
1711
rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
1712
if (!rxq->bufs)
1713
return -ENOMEM;
1714
1715
/* Allocate full chunk of data buffer for DMA first and do indexing and
1716
* initialization next, so it can be freed easily
1717
*/
1718
rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1719
rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1720
&rxq->buf_p_addr,
1721
GFP_KERNEL | __GFP_NOWARN);
1722
if (!rxq->buf_v_addr) {
1723
kfree(rxq->bufs);
1724
return -ENOMEM;
1725
}
1726
1727
/* Setup the allocated DMA buffer to bufs. Each data_buf should
1728
* have virtual address and physical address
1729
*/
1730
for (i = 0; i < rxq->count; i++) {
1731
buf = &rxq->bufs[i];
1732
buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1733
buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1734
}
1735
1736
return 0;
1737
}
1738
1739
static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1740
dma_addr_t p_addr, void *v_addr,
1741
struct ia *ia)
1742
{
1743
/* TR Head Index Array */
1744
ia->tr_hia_p_addr = p_addr;
1745
ia->tr_hia = v_addr;
1746
1747
/* TR Tail Index Array */
1748
ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1749
ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1750
1751
/* CR Head index Array */
1752
ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1753
ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1754
1755
/* CR Tail Index Array */
1756
ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1757
ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1758
}
1759
1760
static void btintel_pcie_free(struct btintel_pcie_data *data)
1761
{
1762
btintel_pcie_free_rxq_bufs(data, &data->rxq);
1763
btintel_pcie_free_txq_bufs(data, &data->txq);
1764
1765
dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1766
dma_pool_destroy(data->dma_pool);
1767
}
1768
1769
/* Allocate tx and rx queues, any related data structures and buffers.
1770
*/
1771
static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1772
{
1773
int err = 0;
1774
size_t total;
1775
dma_addr_t p_addr;
1776
void *v_addr;
1777
1778
/* Allocate the chunk of DMA memory for descriptors, index array, and
1779
* context information, instead of allocating individually.
1780
* The DMA memory for data buffer is allocated while setting up the
1781
* each queue.
1782
*
1783
* Total size is sum of the following
1784
* + size of TFD * Number of descriptors in queue
1785
* + size of URBD0 * Number of descriptors in queue
1786
* + size of FRBD * Number of descriptors in queue
1787
* + size of URBD1 * Number of descriptors in queue
1788
* + size of index * Number of queues(2) * type of index array(4)
1789
* + size of context information
1790
*/
1791
total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
1792
total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
1793
1794
/* Add the sum of size of index array and size of ci struct */
1795
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1796
1797
/* Allocate DMA Pool */
1798
data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1799
total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1800
if (!data->dma_pool) {
1801
err = -ENOMEM;
1802
goto exit_error;
1803
}
1804
1805
v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1806
&p_addr);
1807
if (!v_addr) {
1808
dma_pool_destroy(data->dma_pool);
1809
err = -ENOMEM;
1810
goto exit_error;
1811
}
1812
1813
data->dma_p_addr = p_addr;
1814
data->dma_v_addr = v_addr;
1815
1816
/* Setup descriptor count */
1817
data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
1818
data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
1819
1820
/* Setup tfds */
1821
data->txq.tfds_p_addr = p_addr;
1822
data->txq.tfds = v_addr;
1823
1824
p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1825
v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1826
1827
/* Setup urbd0 */
1828
data->txq.urbd0s_p_addr = p_addr;
1829
data->txq.urbd0s = v_addr;
1830
1831
p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1832
v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1833
1834
/* Setup FRBD*/
1835
data->rxq.frbds_p_addr = p_addr;
1836
data->rxq.frbds = v_addr;
1837
1838
p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1839
v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1840
1841
/* Setup urbd1 */
1842
data->rxq.urbd1s_p_addr = p_addr;
1843
data->rxq.urbd1s = v_addr;
1844
1845
p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1846
v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1847
1848
/* Setup data buffers for txq */
1849
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1850
if (err)
1851
goto exit_error_pool;
1852
1853
/* Setup data buffers for rxq */
1854
err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1855
if (err)
1856
goto exit_error_txq;
1857
1858
/* Setup Index Array */
1859
btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1860
1861
/* Setup data buffers for dbgc */
1862
err = btintel_pcie_setup_dbgc(data);
1863
if (err)
1864
goto exit_error_txq;
1865
1866
/* Setup Context Information */
1867
p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1868
v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1869
1870
data->ci = v_addr;
1871
data->ci_p_addr = p_addr;
1872
1873
/* Initialize the CI */
1874
btintel_pcie_init_ci(data, data->ci);
1875
1876
return 0;
1877
1878
exit_error_txq:
1879
btintel_pcie_free_txq_bufs(data, &data->txq);
1880
exit_error_pool:
1881
dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1882
dma_pool_destroy(data->dma_pool);
1883
exit_error:
1884
return err;
1885
}
1886
1887
static int btintel_pcie_open(struct hci_dev *hdev)
1888
{
1889
bt_dev_dbg(hdev, "");
1890
1891
return 0;
1892
}
1893
1894
static int btintel_pcie_close(struct hci_dev *hdev)
1895
{
1896
bt_dev_dbg(hdev, "");
1897
1898
return 0;
1899
}
1900
1901
static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1902
{
1903
struct sk_buff *skb;
1904
struct hci_event_hdr *hdr;
1905
struct hci_ev_cmd_complete *evt;
1906
1907
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1908
if (!skb)
1909
return -ENOMEM;
1910
1911
hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1912
hdr->evt = HCI_EV_CMD_COMPLETE;
1913
hdr->plen = sizeof(*evt) + 1;
1914
1915
evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1916
evt->ncmd = 0x01;
1917
evt->opcode = cpu_to_le16(opcode);
1918
1919
*(u8 *)skb_put(skb, 1) = 0x00;
1920
1921
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1922
1923
return hci_recv_frame(hdev, skb);
1924
}
1925
1926
static int btintel_pcie_send_frame(struct hci_dev *hdev,
1927
struct sk_buff *skb)
1928
{
1929
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1930
struct hci_command_hdr *cmd;
1931
__u16 opcode = ~0;
1932
int ret;
1933
u32 type;
1934
1935
if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1936
return -ENODEV;
1937
1938
/* Due to the fw limitation, the type header of the packet should be
1939
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1940
* the first byte to get the packet type and redirect the rest of data
1941
* packet to the right handler.
1942
*
1943
* But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1944
* from DMA memory and by the time it reads the first 4 bytes, it has
1945
* already consumed some part of packet. Thus the packet type indicator
1946
* for iBT PCIe is 4 bytes.
1947
*
1948
* Luckily, when HCI core creates the skb, it allocates 8 bytes of
1949
* head room for profile and driver use, and before sending the data
1950
* to the device, append the iBT PCIe packet type in the front.
1951
*/
1952
switch (hci_skb_pkt_type(skb)) {
1953
case HCI_COMMAND_PKT:
1954
type = BTINTEL_PCIE_HCI_CMD_PKT;
1955
cmd = (void *)skb->data;
1956
opcode = le16_to_cpu(cmd->opcode);
1957
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1958
struct hci_command_hdr *cmd = (void *)skb->data;
1959
__u16 opcode = le16_to_cpu(cmd->opcode);
1960
1961
/* When the BTINTEL_HCI_OP_RESET command is issued to
1962
* boot into the operational firmware, it will actually
1963
* not send a command complete event. To keep the flow
1964
* control working inject that event here.
1965
*/
1966
if (opcode == BTINTEL_HCI_OP_RESET)
1967
btintel_pcie_inject_cmd_complete(hdev, opcode);
1968
}
1969
1970
hdev->stat.cmd_tx++;
1971
break;
1972
case HCI_ACLDATA_PKT:
1973
type = BTINTEL_PCIE_HCI_ACL_PKT;
1974
hdev->stat.acl_tx++;
1975
break;
1976
case HCI_SCODATA_PKT:
1977
type = BTINTEL_PCIE_HCI_SCO_PKT;
1978
hdev->stat.sco_tx++;
1979
break;
1980
case HCI_ISODATA_PKT:
1981
type = BTINTEL_PCIE_HCI_ISO_PKT;
1982
break;
1983
default:
1984
bt_dev_err(hdev, "Unknown HCI packet type");
1985
return -EILSEQ;
1986
}
1987
1988
ret = btintel_pcie_send_sync(data, skb, type, opcode);
1989
if (ret) {
1990
hdev->stat.err_tx++;
1991
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1992
goto exit_error;
1993
}
1994
1995
hdev->stat.byte_tx += skb->len;
1996
kfree_skb(skb);
1997
1998
exit_error:
1999
return ret;
2000
}
2001
2002
static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
2003
{
2004
struct hci_dev *hdev;
2005
2006
hdev = data->hdev;
2007
hci_unregister_dev(hdev);
2008
hci_free_dev(hdev);
2009
data->hdev = NULL;
2010
}
2011
2012
static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
2013
{
2014
spin_lock(&data->irq_lock);
2015
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
2016
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
2017
spin_unlock(&data->irq_lock);
2018
}
2019
2020
static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
2021
{
2022
spin_lock(&data->irq_lock);
2023
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
2024
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
2025
spin_unlock(&data->irq_lock);
2026
}
2027
2028
static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
2029
{
2030
for (int i = 0; i < data->alloc_vecs; i++)
2031
synchronize_irq(data->msix_entries[i].vector);
2032
}
2033
2034
static int btintel_pcie_setup_internal(struct hci_dev *hdev)
2035
{
2036
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2037
const u8 param[1] = { 0xFF };
2038
struct intel_version_tlv ver_tlv;
2039
struct sk_buff *skb;
2040
int err;
2041
2042
BT_DBG("%s", hdev->name);
2043
2044
skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
2045
if (IS_ERR(skb)) {
2046
bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
2047
PTR_ERR(skb));
2048
return PTR_ERR(skb);
2049
}
2050
2051
/* Check the status */
2052
if (skb->data[0]) {
2053
bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
2054
skb->data[0]);
2055
err = -EIO;
2056
goto exit_error;
2057
}
2058
2059
/* Apply the common HCI quirks for Intel device */
2060
hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
2061
hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
2062
hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
2063
2064
/* Set up the quality report callback for Intel devices */
2065
hdev->set_quality_report = btintel_set_quality_report;
2066
2067
memset(&ver_tlv, 0, sizeof(ver_tlv));
2068
/* For TLV type device, parse the tlv data */
2069
err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
2070
if (err) {
2071
bt_dev_err(hdev, "Failed to parse TLV version information");
2072
goto exit_error;
2073
}
2074
2075
switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
2076
case 0x37:
2077
break;
2078
default:
2079
bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
2080
INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
2081
err = -EINVAL;
2082
goto exit_error;
2083
}
2084
2085
/* Check for supported iBT hardware variants of this firmware
2086
* loading method.
2087
*
2088
* This check has been put in place to ensure correct forward
2089
* compatibility options when newer hardware variants come
2090
* along.
2091
*/
2092
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
2093
case 0x1e: /* BzrI */
2094
case 0x1f: /* ScP */
2095
case 0x22: /* BzrIW */
2096
/* Display version information of TLV type */
2097
btintel_version_info_tlv(hdev, &ver_tlv);
2098
2099
/* Apply the device specific HCI quirks for TLV based devices
2100
*
2101
* All TLV based devices support WBS
2102
*/
2103
hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2104
2105
/* Setup MSFT Extension support */
2106
btintel_set_msft_opcode(hdev,
2107
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2108
2109
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
2110
if (err)
2111
goto exit_error;
2112
break;
2113
default:
2114
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
2115
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2116
err = -EINVAL;
2117
goto exit_error;
2118
break;
2119
}
2120
2121
data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2122
data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2123
data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2124
data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2125
data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2126
data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2127
2128
if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2129
data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2130
2131
btintel_print_fseq_info(hdev);
2132
exit_error:
2133
kfree_skb(skb);
2134
2135
return err;
2136
}
2137
2138
static int btintel_pcie_setup(struct hci_dev *hdev)
2139
{
2140
int err, fw_dl_retry = 0;
2141
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2142
2143
while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2144
bt_dev_err(hdev, "Firmware download retry count: %d",
2145
fw_dl_retry);
2146
btintel_pcie_dump_debug_registers(hdev);
2147
btintel_pcie_disable_interrupts(data);
2148
btintel_pcie_synchronize_irqs(data);
2149
err = btintel_pcie_reset_bt(data);
2150
if (err) {
2151
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2152
break;
2153
}
2154
usleep_range(10000, 12000);
2155
btintel_pcie_reset_ia(data);
2156
btintel_pcie_enable_interrupts(data);
2157
btintel_pcie_config_msix(data);
2158
err = btintel_pcie_enable_bt(data);
2159
if (err) {
2160
bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2161
break;
2162
}
2163
btintel_pcie_start_rx(data);
2164
}
2165
2166
if (!err)
2167
set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
2168
return err;
2169
}
2170
2171
static struct btintel_pcie_dev_recovery *
2172
btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
2173
{
2174
struct btintel_pcie_dev_recovery *tmp, *data = NULL;
2175
const char *name = pci_name(pdev);
2176
const size_t name_len = strlen(name) + 1;
2177
struct hci_dev *hdev = to_hci_dev(dev);
2178
2179
spin_lock(&btintel_pcie_recovery_lock);
2180
list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
2181
if (strcmp(tmp->name, name))
2182
continue;
2183
data = tmp;
2184
break;
2185
}
2186
spin_unlock(&btintel_pcie_recovery_lock);
2187
2188
if (data) {
2189
bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
2190
return data;
2191
}
2192
2193
data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
2194
if (!data)
2195
return NULL;
2196
2197
strscpy(data->name, name, name_len);
2198
spin_lock(&btintel_pcie_recovery_lock);
2199
list_add_tail(&data->list, &btintel_pcie_recovery_list);
2200
spin_unlock(&btintel_pcie_recovery_lock);
2201
2202
return data;
2203
}
2204
2205
static void btintel_pcie_free_restart_list(void)
2206
{
2207
struct btintel_pcie_dev_recovery *tmp;
2208
2209
while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
2210
typeof(*tmp), list))) {
2211
list_del(&tmp->list);
2212
kfree(tmp);
2213
}
2214
}
2215
2216
static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
2217
struct device *dev)
2218
{
2219
struct btintel_pcie_dev_recovery *data;
2220
time64_t retry_window;
2221
2222
data = btintel_pcie_get_recovery(pdev, dev);
2223
if (!data)
2224
return;
2225
2226
retry_window = ktime_get_boottime_seconds() - data->last_error;
2227
if (data->count == 0) {
2228
data->last_error = ktime_get_boottime_seconds();
2229
data->count++;
2230
} else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2231
data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
2232
data->count++;
2233
} else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
2234
data->last_error = 0;
2235
data->count = 0;
2236
}
2237
}
2238
2239
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
2240
2241
static void btintel_pcie_removal_work(struct work_struct *wk)
2242
{
2243
struct btintel_pcie_removal *removal =
2244
container_of(wk, struct btintel_pcie_removal, work);
2245
struct pci_dev *pdev = removal->pdev;
2246
struct btintel_pcie_data *data;
2247
int err;
2248
2249
pci_lock_rescan_remove();
2250
2251
if (!pdev->bus)
2252
goto error;
2253
2254
data = pci_get_drvdata(pdev);
2255
2256
btintel_pcie_disable_interrupts(data);
2257
btintel_pcie_synchronize_irqs(data);
2258
2259
flush_work(&data->rx_work);
2260
2261
bt_dev_dbg(data->hdev, "Release bluetooth interface");
2262
btintel_pcie_release_hdev(data);
2263
2264
err = pci_reset_function(pdev);
2265
if (err) {
2266
BT_ERR("Failed resetting the pcie device (%d)", err);
2267
goto error;
2268
}
2269
2270
btintel_pcie_enable_interrupts(data);
2271
btintel_pcie_config_msix(data);
2272
2273
err = btintel_pcie_enable_bt(data);
2274
if (err) {
2275
BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
2276
err);
2277
goto error;
2278
}
2279
2280
btintel_pcie_reset_ia(data);
2281
btintel_pcie_start_rx(data);
2282
data->flags = 0;
2283
2284
err = btintel_pcie_setup_hdev(data);
2285
if (err) {
2286
BT_ERR("Failed registering hdev (%d)", err);
2287
goto error;
2288
}
2289
error:
2290
pci_dev_put(pdev);
2291
pci_unlock_rescan_remove();
2292
kfree(removal);
2293
}
2294
2295
static void btintel_pcie_reset(struct hci_dev *hdev)
2296
{
2297
struct btintel_pcie_removal *removal;
2298
struct btintel_pcie_data *data;
2299
2300
data = hci_get_drvdata(hdev);
2301
2302
if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
2303
return;
2304
2305
if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
2306
return;
2307
2308
removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2309
if (!removal)
2310
return;
2311
2312
removal->pdev = data->pdev;
2313
INIT_WORK(&removal->work, btintel_pcie_removal_work);
2314
pci_dev_get(removal->pdev);
2315
schedule_work(&removal->work);
2316
}
2317
2318
static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
2319
{
2320
struct btintel_pcie_dev_recovery *data;
2321
struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
2322
struct pci_dev *pdev = dev_data->pdev;
2323
time64_t retry_window;
2324
2325
if (code == 0x13) {
2326
bt_dev_err(hdev, "Encountered top exception");
2327
return;
2328
}
2329
2330
data = btintel_pcie_get_recovery(pdev, &hdev->dev);
2331
if (!data)
2332
return;
2333
2334
retry_window = ktime_get_boottime_seconds() - data->last_error;
2335
2336
if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2337
data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
2338
bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
2339
BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
2340
bt_dev_dbg(hdev, "Boot time: %lld seconds",
2341
ktime_get_boottime_seconds());
2342
bt_dev_dbg(hdev, "last error at: %lld seconds",
2343
data->last_error);
2344
return;
2345
}
2346
btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
2347
btintel_pcie_reset(hdev);
2348
}
2349
2350
static bool btintel_pcie_wakeup(struct hci_dev *hdev)
2351
{
2352
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2353
2354
return device_may_wakeup(&data->pdev->dev);
2355
}
2356
2357
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2358
{
2359
int err;
2360
struct hci_dev *hdev;
2361
2362
hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2363
if (!hdev)
2364
return -ENOMEM;
2365
2366
hdev->bus = HCI_PCI;
2367
hci_set_drvdata(hdev, data);
2368
2369
data->hdev = hdev;
2370
SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2371
2372
hdev->manufacturer = 2;
2373
hdev->open = btintel_pcie_open;
2374
hdev->close = btintel_pcie_close;
2375
hdev->send = btintel_pcie_send_frame;
2376
hdev->setup = btintel_pcie_setup;
2377
hdev->shutdown = btintel_shutdown_combined;
2378
hdev->hw_error = btintel_pcie_hw_error;
2379
hdev->set_diag = btintel_set_diag;
2380
hdev->set_bdaddr = btintel_set_bdaddr;
2381
hdev->reset = btintel_pcie_reset;
2382
hdev->wakeup = btintel_pcie_wakeup;
2383
2384
err = hci_register_dev(hdev);
2385
if (err < 0) {
2386
BT_ERR("Failed to register to hdev (%d)", err);
2387
goto exit_error;
2388
}
2389
2390
data->dmp_hdr.driver_name = KBUILD_MODNAME;
2391
return 0;
2392
2393
exit_error:
2394
hci_free_dev(hdev);
2395
return err;
2396
}
2397
2398
static int btintel_pcie_probe(struct pci_dev *pdev,
2399
const struct pci_device_id *ent)
2400
{
2401
int err;
2402
struct btintel_pcie_data *data;
2403
2404
if (!pdev)
2405
return -ENODEV;
2406
2407
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2408
if (!data)
2409
return -ENOMEM;
2410
2411
data->pdev = pdev;
2412
2413
spin_lock_init(&data->irq_lock);
2414
spin_lock_init(&data->hci_rx_lock);
2415
2416
init_waitqueue_head(&data->gp0_wait_q);
2417
data->gp0_received = false;
2418
2419
init_waitqueue_head(&data->tx_wait_q);
2420
data->tx_wait_done = false;
2421
2422
data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2423
if (!data->workqueue)
2424
return -ENOMEM;
2425
2426
skb_queue_head_init(&data->rx_skb_q);
2427
INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2428
2429
data->boot_stage_cache = 0x00;
2430
data->img_resp_cache = 0x00;
2431
2432
err = btintel_pcie_config_pcie(pdev, data);
2433
if (err)
2434
goto exit_error;
2435
2436
pci_set_drvdata(pdev, data);
2437
2438
err = btintel_pcie_alloc(data);
2439
if (err)
2440
goto exit_error;
2441
2442
err = btintel_pcie_enable_bt(data);
2443
if (err)
2444
goto exit_error;
2445
2446
/* CNV information (CNVi and CNVr) is in CSR */
2447
data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2448
2449
data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2450
2451
err = btintel_pcie_start_rx(data);
2452
if (err)
2453
goto exit_error;
2454
2455
err = btintel_pcie_setup_hdev(data);
2456
if (err)
2457
goto exit_error;
2458
2459
bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2460
data->cnvr);
2461
return 0;
2462
2463
exit_error:
2464
/* reset device before exit */
2465
btintel_pcie_reset_bt(data);
2466
2467
pci_clear_master(pdev);
2468
2469
pci_set_drvdata(pdev, NULL);
2470
2471
return err;
2472
}
2473
2474
static void btintel_pcie_remove(struct pci_dev *pdev)
2475
{
2476
struct btintel_pcie_data *data;
2477
2478
data = pci_get_drvdata(pdev);
2479
2480
btintel_pcie_disable_interrupts(data);
2481
2482
btintel_pcie_synchronize_irqs(data);
2483
2484
flush_work(&data->rx_work);
2485
2486
btintel_pcie_reset_bt(data);
2487
for (int i = 0; i < data->alloc_vecs; i++) {
2488
struct msix_entry *msix_entry;
2489
2490
msix_entry = &data->msix_entries[i];
2491
free_irq(msix_entry->vector, msix_entry);
2492
}
2493
2494
pci_free_irq_vectors(pdev);
2495
2496
btintel_pcie_release_hdev(data);
2497
2498
destroy_workqueue(data->workqueue);
2499
2500
btintel_pcie_free(data);
2501
2502
pci_clear_master(pdev);
2503
2504
pci_set_drvdata(pdev, NULL);
2505
}
2506
2507
#ifdef CONFIG_DEV_COREDUMP
2508
static void btintel_pcie_coredump(struct device *dev)
2509
{
2510
struct pci_dev *pdev = to_pci_dev(dev);
2511
struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2512
2513
if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2514
return;
2515
2516
data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2517
queue_work(data->workqueue, &data->rx_work);
2518
}
2519
#endif
2520
2521
static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
2522
{
2523
struct pci_dev *pdev = to_pci_dev(dev);
2524
struct btintel_pcie_data *data;
2525
ktime_t start;
2526
u32 dxstate;
2527
int err;
2528
2529
data = pci_get_drvdata(pdev);
2530
2531
dxstate = (mesg.event == PM_EVENT_SUSPEND ?
2532
BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
2533
2534
data->gp0_received = false;
2535
2536
start = ktime_get();
2537
2538
/* Refer: 6.4.11.7 -> Platform power management */
2539
btintel_pcie_wr_sleep_cntrl(data, dxstate);
2540
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
2541
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
2542
if (err == 0) {
2543
bt_dev_err(data->hdev,
2544
"Timeout (%u ms) on alive interrupt for D3 entry",
2545
BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
2546
return -EBUSY;
2547
}
2548
2549
bt_dev_dbg(data->hdev,
2550
"device entered into d3 state from d0 in %lld us",
2551
ktime_to_us(ktime_get() - start));
2552
2553
return 0;
2554
}
2555
2556
static int btintel_pcie_suspend(struct device *dev)
2557
{
2558
return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
2559
}
2560
2561
static int btintel_pcie_hibernate(struct device *dev)
2562
{
2563
return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
2564
}
2565
2566
static int btintel_pcie_freeze(struct device *dev)
2567
{
2568
return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
2569
}
2570
2571
static int btintel_pcie_resume(struct device *dev)
2572
{
2573
struct pci_dev *pdev = to_pci_dev(dev);
2574
struct btintel_pcie_data *data;
2575
ktime_t start;
2576
int err;
2577
2578
data = pci_get_drvdata(pdev);
2579
data->gp0_received = false;
2580
2581
start = ktime_get();
2582
2583
/* Refer: 6.4.11.7 -> Platform power management */
2584
btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
2585
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
2586
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
2587
if (err == 0) {
2588
bt_dev_err(data->hdev,
2589
"Timeout (%u ms) on alive interrupt for D0 entry",
2590
BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
2591
return -EBUSY;
2592
}
2593
2594
bt_dev_dbg(data->hdev,
2595
"device entered into d0 state from d3 in %lld us",
2596
ktime_to_us(ktime_get() - start));
2597
return 0;
2598
}
2599
2600
static const struct dev_pm_ops btintel_pcie_pm_ops = {
2601
.suspend = btintel_pcie_suspend,
2602
.resume = btintel_pcie_resume,
2603
.freeze = btintel_pcie_freeze,
2604
.thaw = btintel_pcie_resume,
2605
.poweroff = btintel_pcie_hibernate,
2606
.restore = btintel_pcie_resume,
2607
};
2608
2609
static struct pci_driver btintel_pcie_driver = {
2610
.name = KBUILD_MODNAME,
2611
.id_table = btintel_pcie_table,
2612
.probe = btintel_pcie_probe,
2613
.remove = btintel_pcie_remove,
2614
.driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
2615
#ifdef CONFIG_DEV_COREDUMP
2616
.driver.coredump = btintel_pcie_coredump
2617
#endif
2618
};
2619
2620
static int __init btintel_pcie_init(void)
2621
{
2622
return pci_register_driver(&btintel_pcie_driver);
2623
}
2624
2625
static void __exit btintel_pcie_exit(void)
2626
{
2627
pci_unregister_driver(&btintel_pcie_driver);
2628
btintel_pcie_free_restart_list();
2629
}
2630
2631
module_init(btintel_pcie_init);
2632
module_exit(btintel_pcie_exit);
2633
2634
MODULE_AUTHOR("Tedd Ho-Jeong An <[email protected]>");
2635
MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2636
MODULE_VERSION(VERSION);
2637
MODULE_LICENSE("GPL");
2638
2639