Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/firmware.h>
12#include <linux/pci.h>
13#include <linux/wait.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16
17#include <asm/unaligned.h>
18
19#include <net/bluetooth/bluetooth.h>
20#include <net/bluetooth/hci_core.h>
21
22#include "btintel.h"
23#include "btintel_pcie.h"
24
25#define VERSION "0.1"
26
27#define BTINTEL_PCI_DEVICE(dev, subdev) \
28 .vendor = PCI_VENDOR_ID_INTEL, \
29 .device = (dev), \
30 .subvendor = PCI_ANY_ID, \
31 .subdevice = (subdev), \
32 .driver_data = 0
33
34#define POLL_INTERVAL_US 10
35
36/* Intel Bluetooth PCIe device id table */
37static const struct pci_device_id btintel_pcie_table[] = {
38 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
39 { 0 }
40};
41MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
42
43/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
44#define BTINTEL_PCIE_HCI_TYPE_LEN 4
45#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
46#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
47#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
48#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
49
50static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
51 u16 queue_num)
52{
53 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
54 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
55 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
56 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
57}
58
59static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
60 u16 index)
61{
62 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
63 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
64}
65
66static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset,
67 u32 bits, u32 mask, int timeout_us)
68{
69 int t = 0;
70 u32 reg;
71
72 do {
73 reg = btintel_pcie_rd_reg32(data, offset);
74
75 if ((reg & mask) == (bits & mask))
76 return t;
77 udelay(POLL_INTERVAL_US);
78 t += POLL_INTERVAL_US;
79 } while (t < timeout_us);
80
81 return -ETIMEDOUT;
82}
83
84static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
85{
86 u8 queue = entry->entry;
87 struct msix_entry *entries = entry - queue;
88
89 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
90}
91
92/* Set the doorbell for TXQ to notify the device that @index (actually index-1)
93 * of the TFD is updated and ready to transmit.
94 */
95static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
96{
97 u32 val;
98
99 val = index;
100 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
101
102 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
103}
104
105/* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
106 * descriptor) with the data length and the DMA address of the data buffer.
107 */
108static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
109 struct sk_buff *skb)
110{
111 struct data_buf *buf;
112 struct tfd *tfd;
113
114 tfd = &txq->tfds[tfd_index];
115 memset(tfd, 0, sizeof(*tfd));
116
117 buf = &txq->bufs[tfd_index];
118
119 tfd->size = skb->len;
120 tfd->addr = buf->data_p_addr;
121
122 /* Copy the outgoing data to DMA buffer */
123 memcpy(buf->data, skb->data, tfd->size);
124}
125
126static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
127 struct sk_buff *skb)
128{
129 int ret;
130 u16 tfd_index;
131 struct txq *txq = &data->txq;
132
133 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
134
135 if (tfd_index > txq->count)
136 return -ERANGE;
137
138 /* Prepare for TX. It updates the TFD with the length of data and
139 * address of the DMA buffer, and copy the data to the DMA buffer
140 */
141 btintel_pcie_prepare_tx(txq, tfd_index, skb);
142
143 tfd_index = (tfd_index + 1) % txq->count;
144 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
145
146 /* Arm wait event condition */
147 data->tx_wait_done = false;
148
149 /* Set the doorbell to notify the device */
150 btintel_pcie_set_tx_db(data, tfd_index);
151
152 /* Wait for the complete interrupt - URBD0 */
153 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
154 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
155 if (!ret)
156 return -ETIME;
157
158 return 0;
159}
160
161/* Set the doorbell for RXQ to notify the device that @index (actually index-1)
162 * is available to receive the data
163 */
164static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
165{
166 u32 val;
167
168 val = index;
169 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
170
171 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
172}
173
174/* Update the FRBD (free buffer descriptor) with the @frbd_index and the
175 * DMA address of the free buffer.
176 */
177static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
178{
179 struct data_buf *buf;
180 struct frbd *frbd;
181
182 /* Get the buffer of the FRBD for DMA */
183 buf = &rxq->bufs[frbd_index];
184
185 frbd = &rxq->frbds[frbd_index];
186 memset(frbd, 0, sizeof(*frbd));
187
188 /* Update FRBD */
189 frbd->tag = frbd_index;
190 frbd->addr = buf->data_p_addr;
191}
192
193static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
194{
195 u16 frbd_index;
196 struct rxq *rxq = &data->rxq;
197
198 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
199
200 if (frbd_index > rxq->count)
201 return -ERANGE;
202
203 /* Prepare for RX submit. It updates the FRBD with the address of DMA
204 * buffer
205 */
206 btintel_pcie_prepare_rx(rxq, frbd_index);
207
208 frbd_index = (frbd_index + 1) % rxq->count;
209 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
210 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
211
212 /* Set the doorbell to notify the device */
213 btintel_pcie_set_rx_db(data, frbd_index);
214
215 return 0;
216}
217
218static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
219{
220 int i, ret;
221
222 for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
223 ret = btintel_pcie_submit_rx(data);
224 if (ret)
225 return ret;
226 }
227
228 return 0;
229}
230
231static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
232{
233 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
234 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
235 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
236 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
237}
238
239static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
240{
241 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
242 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
243}
244
245/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
246 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
247 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
248 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
249 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
250 */
251static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
252{
253 int err;
254
255 data->gp0_received = false;
256
257 /* Update the DMA address of CI struct to CSR */
258 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
259 data->ci_p_addr & 0xffffffff);
260 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
261 (u64)data->ci_p_addr >> 32);
262
263 /* Reset the cached value of boot stage. it is updated by the MSI-X
264 * gp0 interrupt handler.
265 */
266 data->boot_stage_cache = 0x0;
267
268 /* Set MAC_INIT bit to start primary bootloader */
269 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
270
271 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
272 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
273
274 /* Wait until MAC_ACCESS is granted */
275 err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
276 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
277 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
278 BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US);
279 if (err < 0)
280 return -ENODEV;
281
282 /* MAC is ready. Enable BT FUNC */
283 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
284 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
285 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
286
287 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
288
289 /* wait for interrupt from the device after booting up to primary
290 * bootloader.
291 */
292 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
293 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
294 if (!err)
295 return -ETIME;
296
297 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
298 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
299 return -ENODEV;
300
301 return 0;
302}
303
304/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
305 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
306 */
307static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
308{
309 u32 reg;
310
311 /* This interrupt is for three different causes and it is not easy to
312 * know what causes the interrupt. So, it compares each register value
313 * with cached value and update it before it wake up the queue.
314 */
315 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
316 if (reg != data->boot_stage_cache)
317 data->boot_stage_cache = reg;
318
319 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
320 if (reg != data->img_resp_cache)
321 data->img_resp_cache = reg;
322
323 data->gp0_received = true;
324
325 /* If the boot stage is OP or IML, reset IA and start RX again */
326 if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
327 data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
328 btintel_pcie_reset_ia(data);
329 btintel_pcie_start_rx(data);
330 }
331
332 wake_up(&data->gp0_wait_q);
333}
334
335/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
336 */
337static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
338{
339 u16 cr_tia, cr_hia;
340 struct txq *txq;
341 struct urbd0 *urbd0;
342
343 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
344 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
345
346 if (cr_tia == cr_hia)
347 return;
348
349 txq = &data->txq;
350
351 while (cr_tia != cr_hia) {
352 data->tx_wait_done = true;
353 wake_up(&data->tx_wait_q);
354
355 urbd0 = &txq->urbd0s[cr_tia];
356
357 if (urbd0->tfd_index > txq->count)
358 return;
359
360 cr_tia = (cr_tia + 1) % txq->count;
361 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
362 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
363 }
364}
365
366/* Process the received rx data
367 * It check the frame header to identify the data type and create skb
368 * and calling HCI API
369 */
370static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
371 struct sk_buff *skb)
372{
373 int ret;
374 u8 pkt_type;
375 u16 plen;
376 u32 pcie_pkt_type;
377 struct sk_buff *new_skb;
378 void *pdata;
379 struct hci_dev *hdev = data->hdev;
380
381 spin_lock(&data->hci_rx_lock);
382
383 /* The first 4 bytes indicates the Intel PCIe specific packet type */
384 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
385 if (!pdata) {
386 bt_dev_err(hdev, "Corrupted packet received");
387 ret = -EILSEQ;
388 goto exit_error;
389 }
390
391 pcie_pkt_type = get_unaligned_le32(pdata);
392
393 switch (pcie_pkt_type) {
394 case BTINTEL_PCIE_HCI_ACL_PKT:
395 if (skb->len >= HCI_ACL_HDR_SIZE) {
396 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
397 pkt_type = HCI_ACLDATA_PKT;
398 } else {
399 bt_dev_err(hdev, "ACL packet is too short");
400 ret = -EILSEQ;
401 goto exit_error;
402 }
403 break;
404
405 case BTINTEL_PCIE_HCI_SCO_PKT:
406 if (skb->len >= HCI_SCO_HDR_SIZE) {
407 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
408 pkt_type = HCI_SCODATA_PKT;
409 } else {
410 bt_dev_err(hdev, "SCO packet is too short");
411 ret = -EILSEQ;
412 goto exit_error;
413 }
414 break;
415
416 case BTINTEL_PCIE_HCI_EVT_PKT:
417 if (skb->len >= HCI_EVENT_HDR_SIZE) {
418 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
419 pkt_type = HCI_EVENT_PKT;
420 } else {
421 bt_dev_err(hdev, "Event packet is too short");
422 ret = -EILSEQ;
423 goto exit_error;
424 }
425 break;
426 default:
427 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
428 pcie_pkt_type);
429 ret = -EINVAL;
430 goto exit_error;
431 }
432
433 if (skb->len < plen) {
434 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
435 pkt_type);
436 ret = -EILSEQ;
437 goto exit_error;
438 }
439
440 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
441
442 new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
443 if (!new_skb) {
444 bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
445 skb->len);
446 ret = -ENOMEM;
447 goto exit_error;
448 }
449
450 hci_skb_pkt_type(new_skb) = pkt_type;
451 skb_put_data(new_skb, skb->data, plen);
452 hdev->stat.byte_rx += plen;
453
454 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
455 ret = btintel_recv_event(hdev, new_skb);
456 else
457 ret = hci_recv_frame(hdev, new_skb);
458
459exit_error:
460 if (ret)
461 hdev->stat.err_rx++;
462
463 spin_unlock(&data->hci_rx_lock);
464
465 return ret;
466}
467
468static void btintel_pcie_rx_work(struct work_struct *work)
469{
470 struct btintel_pcie_data *data = container_of(work,
471 struct btintel_pcie_data, rx_work);
472 struct sk_buff *skb;
473 int err;
474 struct hci_dev *hdev = data->hdev;
475
476 /* Process the sk_buf in queue and send to the HCI layer */
477 while ((skb = skb_dequeue(&data->rx_skb_q))) {
478 err = btintel_pcie_recv_frame(data, skb);
479 if (err)
480 bt_dev_err(hdev, "Failed to send received frame: %d",
481 err);
482 kfree_skb(skb);
483 }
484}
485
486/* create sk_buff with data and save it to queue and start RX work */
487static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
488 void *buf)
489{
490 int ret, len;
491 struct rfh_hdr *rfh_hdr;
492 struct sk_buff *skb;
493
494 rfh_hdr = buf;
495
496 len = rfh_hdr->packet_len;
497 if (len <= 0) {
498 ret = -EINVAL;
499 goto resubmit;
500 }
501
502 /* Remove RFH header */
503 buf += sizeof(*rfh_hdr);
504
505 skb = alloc_skb(len, GFP_ATOMIC);
506 if (!skb) {
507 ret = -ENOMEM;
508 goto resubmit;
509 }
510
511 skb_put_data(skb, buf, len);
512 skb_queue_tail(&data->rx_skb_q, skb);
513 queue_work(data->workqueue, &data->rx_work);
514
515resubmit:
516 ret = btintel_pcie_submit_rx(data);
517
518 return ret;
519}
520
521/* Handles the MSI-X interrupt for rx queue 1 which is for RX */
522static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
523{
524 u16 cr_hia, cr_tia;
525 struct rxq *rxq;
526 struct urbd1 *urbd1;
527 struct data_buf *buf;
528 int ret;
529 struct hci_dev *hdev = data->hdev;
530
531 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
532 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
533
534 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
535
536 /* Check CR_TIA and CR_HIA for change */
537 if (cr_tia == cr_hia) {
538 bt_dev_warn(hdev, "RXQ: no new CD found");
539 return;
540 }
541
542 rxq = &data->rxq;
543
544 /* The firmware sends multiple CD in a single MSI-X and it needs to
545 * process all received CDs in this interrupt.
546 */
547 while (cr_tia != cr_hia) {
548 urbd1 = &rxq->urbd1s[cr_tia];
549 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
550
551 buf = &rxq->bufs[urbd1->frbd_tag];
552 if (!buf) {
553 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
554 urbd1->frbd_tag);
555 return;
556 }
557
558 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
559 buf->data);
560 if (ret) {
561 bt_dev_err(hdev, "RXQ: failed to submit rx request");
562 return;
563 }
564
565 cr_tia = (cr_tia + 1) % rxq->count;
566 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
567 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
568 }
569}
570
571static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
572{
573 return IRQ_WAKE_THREAD;
574}
575
576static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
577{
578 struct msix_entry *entry = dev_id;
579 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
580 u32 intr_fh, intr_hw;
581
582 spin_lock(&data->irq_lock);
583 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
584 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
585
586 /* Clear causes registers to avoid being handling the same cause */
587 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
588 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
589 spin_unlock(&data->irq_lock);
590
591 if (unlikely(!(intr_fh | intr_hw))) {
592 /* Ignore interrupt, inta == 0 */
593 return IRQ_NONE;
594 }
595
596 /* This interrupt is triggered by the firmware after updating
597 * boot_stage register and image_response register
598 */
599 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
600 btintel_pcie_msix_gp0_handler(data);
601
602 /* For TX */
603 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
604 btintel_pcie_msix_tx_handle(data);
605
606 /* For RX */
607 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
608 btintel_pcie_msix_rx_handle(data);
609
610 /*
611 * Before sending the interrupt the HW disables it to prevent a nested
612 * interrupt. This is done by writing 1 to the corresponding bit in
613 * the mask register. After handling the interrupt, it should be
614 * re-enabled by clearing this bit. This register is defined as write 1
615 * clear (W1C) register, meaning that it's cleared by writing 1
616 * to the bit.
617 */
618 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
619 BIT(entry->entry));
620
621 return IRQ_HANDLED;
622}
623
624/* This function requests the irq for MSI-X and registers the handlers per irq.
625 * Currently, it requests only 1 irq for all interrupt causes.
626 */
627static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
628{
629 int err;
630 int num_irqs, i;
631
632 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
633 data->msix_entries[i].entry = i;
634
635 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
636 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
637 if (num_irqs < 0)
638 return num_irqs;
639
640 data->alloc_vecs = num_irqs;
641 data->msix_enabled = 1;
642 data->def_irq = 0;
643
644 /* setup irq handler */
645 for (i = 0; i < data->alloc_vecs; i++) {
646 struct msix_entry *msix_entry;
647
648 msix_entry = &data->msix_entries[i];
649 msix_entry->vector = pci_irq_vector(data->pdev, i);
650
651 err = devm_request_threaded_irq(&data->pdev->dev,
652 msix_entry->vector,
653 btintel_pcie_msix_isr,
654 btintel_pcie_irq_msix_handler,
655 IRQF_SHARED,
656 KBUILD_MODNAME,
657 msix_entry);
658 if (err) {
659 pci_free_irq_vectors(data->pdev);
660 data->alloc_vecs = 0;
661 return err;
662 }
663 }
664 return 0;
665}
666
667struct btintel_pcie_causes_list {
668 u32 cause;
669 u32 mask_reg;
670 u8 cause_num;
671};
672
673static struct btintel_pcie_causes_list causes_list[] = {
674 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
675 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
676 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
677};
678
679/* This function configures the interrupt masks for both HW_INT_CAUSES and
680 * FH_INT_CAUSES which are meaningful to us.
681 *
682 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
683 * need to call this function again to configure since the masks
684 * are reset to 0xFFFFFFFF after reset.
685 */
686static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
687{
688 int i;
689 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
690
691 /* Set Non Auto Clear Cause */
692 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
693 btintel_pcie_wr_reg8(data,
694 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
695 val);
696 btintel_pcie_clr_reg_bits(data,
697 causes_list[i].mask_reg,
698 causes_list[i].cause);
699 }
700
701 /* Save the initial interrupt mask */
702 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
703 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
704}
705
706static int btintel_pcie_config_pcie(struct pci_dev *pdev,
707 struct btintel_pcie_data *data)
708{
709 int err;
710
711 err = pcim_enable_device(pdev);
712 if (err)
713 return err;
714
715 pci_set_master(pdev);
716
717 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
718 if (err) {
719 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
720 if (err)
721 return err;
722 }
723
724 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
725 if (err)
726 return err;
727
728 data->base_addr = pcim_iomap_table(pdev)[0];
729 if (!data->base_addr)
730 return -ENODEV;
731
732 err = btintel_pcie_setup_irq(data);
733 if (err)
734 return err;
735
736 /* Configure MSI-X with causes list */
737 btintel_pcie_config_msix(data);
738
739 return 0;
740}
741
742static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
743 struct ctx_info *ci)
744{
745 ci->version = 0x1;
746 ci->size = sizeof(*ci);
747 ci->config = 0x0000;
748 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
749 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
750 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
751 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
752 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
753 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
754 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
755 ci->addr_tfdq = data->txq.tfds_p_addr;
756 ci->num_tfdq = data->txq.count;
757 ci->num_urbdq0 = data->txq.count;
758 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
759 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
760 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
761 ci->addr_frbdq = data->rxq.frbds_p_addr;
762 ci->num_frbdq = data->rxq.count;
763 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
764 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
765 ci->num_urbdq1 = data->rxq.count;
766 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
767}
768
769static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
770 struct txq *txq)
771{
772 /* Free data buffers first */
773 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
774 txq->buf_v_addr, txq->buf_p_addr);
775 kfree(txq->bufs);
776}
777
778static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
779 struct txq *txq)
780{
781 int i;
782 struct data_buf *buf;
783
784 /* Allocate the same number of buffers as the descriptor */
785 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
786 if (!txq->bufs)
787 return -ENOMEM;
788
789 /* Allocate full chunk of data buffer for DMA first and do indexing and
790 * initialization next, so it can be freed easily
791 */
792 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
793 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
794 &txq->buf_p_addr,
795 GFP_KERNEL | __GFP_NOWARN);
796 if (!txq->buf_v_addr) {
797 kfree(txq->bufs);
798 return -ENOMEM;
799 }
800
801 /* Setup the allocated DMA buffer to bufs. Each data_buf should
802 * have virtual address and physical address
803 */
804 for (i = 0; i < txq->count; i++) {
805 buf = &txq->bufs[i];
806 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
807 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
808 }
809
810 return 0;
811}
812
813static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
814 struct rxq *rxq)
815{
816 /* Free data buffers first */
817 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
818 rxq->buf_v_addr, rxq->buf_p_addr);
819 kfree(rxq->bufs);
820}
821
822static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
823 struct rxq *rxq)
824{
825 int i;
826 struct data_buf *buf;
827
828 /* Allocate the same number of buffers as the descriptor */
829 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
830 if (!rxq->bufs)
831 return -ENOMEM;
832
833 /* Allocate full chunk of data buffer for DMA first and do indexing and
834 * initialization next, so it can be freed easily
835 */
836 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
837 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
838 &rxq->buf_p_addr,
839 GFP_KERNEL | __GFP_NOWARN);
840 if (!rxq->buf_v_addr) {
841 kfree(rxq->bufs);
842 return -ENOMEM;
843 }
844
845 /* Setup the allocated DMA buffer to bufs. Each data_buf should
846 * have virtual address and physical address
847 */
848 for (i = 0; i < rxq->count; i++) {
849 buf = &rxq->bufs[i];
850 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
851 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
852 }
853
854 return 0;
855}
856
857static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
858 dma_addr_t p_addr, void *v_addr,
859 struct ia *ia)
860{
861 /* TR Head Index Array */
862 ia->tr_hia_p_addr = p_addr;
863 ia->tr_hia = v_addr;
864
865 /* TR Tail Index Array */
866 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
867 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
868
869 /* CR Head index Array */
870 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
871 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
872
873 /* CR Tail Index Array */
874 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
875 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
876}
877
878static void btintel_pcie_free(struct btintel_pcie_data *data)
879{
880 btintel_pcie_free_rxq_bufs(data, &data->rxq);
881 btintel_pcie_free_txq_bufs(data, &data->txq);
882
883 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
884 dma_pool_destroy(data->dma_pool);
885}
886
887/* Allocate tx and rx queues, any related data structures and buffers.
888 */
889static int btintel_pcie_alloc(struct btintel_pcie_data *data)
890{
891 int err = 0;
892 size_t total;
893 dma_addr_t p_addr;
894 void *v_addr;
895
896 /* Allocate the chunk of DMA memory for descriptors, index array, and
897 * context information, instead of allocating individually.
898 * The DMA memory for data buffer is allocated while setting up the
899 * each queue.
900 *
901 * Total size is sum of the following
902 * + size of TFD * Number of descriptors in queue
903 * + size of URBD0 * Number of descriptors in queue
904 * + size of FRBD * Number of descriptors in queue
905 * + size of URBD1 * Number of descriptors in queue
906 * + size of index * Number of queues(2) * type of index array(4)
907 * + size of context information
908 */
909 total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
910 + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
911
912 /* Add the sum of size of index array and size of ci struct */
913 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
914
915 /* Allocate DMA Pool */
916 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
917 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
918 if (!data->dma_pool) {
919 err = -ENOMEM;
920 goto exit_error;
921 }
922
923 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
924 &p_addr);
925 if (!v_addr) {
926 dma_pool_destroy(data->dma_pool);
927 err = -ENOMEM;
928 goto exit_error;
929 }
930
931 data->dma_p_addr = p_addr;
932 data->dma_v_addr = v_addr;
933
934 /* Setup descriptor count */
935 data->txq.count = BTINTEL_DESCS_COUNT;
936 data->rxq.count = BTINTEL_DESCS_COUNT;
937
938 /* Setup tfds */
939 data->txq.tfds_p_addr = p_addr;
940 data->txq.tfds = v_addr;
941
942 p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
943 v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
944
945 /* Setup urbd0 */
946 data->txq.urbd0s_p_addr = p_addr;
947 data->txq.urbd0s = v_addr;
948
949 p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
950 v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
951
952 /* Setup FRBD*/
953 data->rxq.frbds_p_addr = p_addr;
954 data->rxq.frbds = v_addr;
955
956 p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
957 v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
958
959 /* Setup urbd1 */
960 data->rxq.urbd1s_p_addr = p_addr;
961 data->rxq.urbd1s = v_addr;
962
963 p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
964 v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
965
966 /* Setup data buffers for txq */
967 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
968 if (err)
969 goto exit_error_pool;
970
971 /* Setup data buffers for rxq */
972 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
973 if (err)
974 goto exit_error_txq;
975
976 /* Setup Index Array */
977 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
978
979 /* Setup Context Information */
980 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
981 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
982
983 data->ci = v_addr;
984 data->ci_p_addr = p_addr;
985
986 /* Initialize the CI */
987 btintel_pcie_init_ci(data, data->ci);
988
989 return 0;
990
991exit_error_txq:
992 btintel_pcie_free_txq_bufs(data, &data->txq);
993exit_error_pool:
994 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
995 dma_pool_destroy(data->dma_pool);
996exit_error:
997 return err;
998}
999
1000static int btintel_pcie_open(struct hci_dev *hdev)
1001{
1002 bt_dev_dbg(hdev, "");
1003
1004 return 0;
1005}
1006
1007static int btintel_pcie_close(struct hci_dev *hdev)
1008{
1009 bt_dev_dbg(hdev, "");
1010
1011 return 0;
1012}
1013
1014static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1015{
1016 struct sk_buff *skb;
1017 struct hci_event_hdr *hdr;
1018 struct hci_ev_cmd_complete *evt;
1019
1020 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1021 if (!skb)
1022 return -ENOMEM;
1023
1024 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1025 hdr->evt = HCI_EV_CMD_COMPLETE;
1026 hdr->plen = sizeof(*evt) + 1;
1027
1028 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1029 evt->ncmd = 0x01;
1030 evt->opcode = cpu_to_le16(opcode);
1031
1032 *(u8 *)skb_put(skb, 1) = 0x00;
1033
1034 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1035
1036 return hci_recv_frame(hdev, skb);
1037}
1038
1039static int btintel_pcie_send_frame(struct hci_dev *hdev,
1040 struct sk_buff *skb)
1041{
1042 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1043 int ret;
1044 u32 type;
1045
1046 /* Due to the fw limitation, the type header of the packet should be
1047 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1048 * the first byte to get the packet type and redirect the rest of data
1049 * packet to the right handler.
1050 *
1051 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1052 * from DMA memory and by the time it reads the first 4 bytes, it has
1053 * already consumed some part of packet. Thus the packet type indicator
1054 * for iBT PCIe is 4 bytes.
1055 *
1056 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1057 * head room for profile and driver use, and before sending the data
1058 * to the device, append the iBT PCIe packet type in the front.
1059 */
1060 switch (hci_skb_pkt_type(skb)) {
1061 case HCI_COMMAND_PKT:
1062 type = BTINTEL_PCIE_HCI_CMD_PKT;
1063 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1064 struct hci_command_hdr *cmd = (void *)skb->data;
1065 __u16 opcode = le16_to_cpu(cmd->opcode);
1066
1067 /* When the 0xfc01 command is issued to boot into
1068 * the operational firmware, it will actually not
1069 * send a command complete event. To keep the flow
1070 * control working inject that event here.
1071 */
1072 if (opcode == 0xfc01)
1073 btintel_pcie_inject_cmd_complete(hdev, opcode);
1074 }
1075 hdev->stat.cmd_tx++;
1076 break;
1077 case HCI_ACLDATA_PKT:
1078 type = BTINTEL_PCIE_HCI_ACL_PKT;
1079 hdev->stat.acl_tx++;
1080 break;
1081 case HCI_SCODATA_PKT:
1082 type = BTINTEL_PCIE_HCI_SCO_PKT;
1083 hdev->stat.sco_tx++;
1084 break;
1085 default:
1086 bt_dev_err(hdev, "Unknown HCI packet type");
1087 return -EILSEQ;
1088 }
1089 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
1090 BTINTEL_PCIE_HCI_TYPE_LEN);
1091
1092 ret = btintel_pcie_send_sync(data, skb);
1093 if (ret) {
1094 hdev->stat.err_tx++;
1095 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1096 goto exit_error;
1097 }
1098 hdev->stat.byte_tx += skb->len;
1099 kfree_skb(skb);
1100
1101exit_error:
1102 return ret;
1103}
1104
1105static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
1106{
1107 struct hci_dev *hdev;
1108
1109 hdev = data->hdev;
1110 hci_unregister_dev(hdev);
1111 hci_free_dev(hdev);
1112 data->hdev = NULL;
1113}
1114
1115static int btintel_pcie_setup(struct hci_dev *hdev)
1116{
1117 const u8 param[1] = { 0xFF };
1118 struct intel_version_tlv ver_tlv;
1119 struct sk_buff *skb;
1120 int err;
1121
1122 BT_DBG("%s", hdev->name);
1123
1124 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
1125 if (IS_ERR(skb)) {
1126 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
1127 PTR_ERR(skb));
1128 return PTR_ERR(skb);
1129 }
1130
1131 /* Check the status */
1132 if (skb->data[0]) {
1133 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
1134 skb->data[0]);
1135 err = -EIO;
1136 goto exit_error;
1137 }
1138
1139 /* Apply the common HCI quirks for Intel device */
1140 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
1141 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
1142 set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
1143
1144 /* Set up the quality report callback for Intel devices */
1145 hdev->set_quality_report = btintel_set_quality_report;
1146
1147 memset(&ver_tlv, 0, sizeof(ver_tlv));
1148 /* For TLV type device, parse the tlv data */
1149 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
1150 if (err) {
1151 bt_dev_err(hdev, "Failed to parse TLV version information");
1152 goto exit_error;
1153 }
1154
1155 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
1156 case 0x37:
1157 break;
1158 default:
1159 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
1160 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
1161 err = -EINVAL;
1162 goto exit_error;
1163 }
1164
1165 /* Check for supported iBT hardware variants of this firmware
1166 * loading method.
1167 *
1168 * This check has been put in place to ensure correct forward
1169 * compatibility options when newer hardware variants come
1170 * along.
1171 */
1172 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
1173 case 0x1e: /* BzrI */
1174 /* Display version information of TLV type */
1175 btintel_version_info_tlv(hdev, &ver_tlv);
1176
1177 /* Apply the device specific HCI quirks for TLV based devices
1178 *
1179 * All TLV based devices support WBS
1180 */
1181 set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
1182
1183 /* Apply LE States quirk from solar onwards */
1184 set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
1185
1186 /* Setup MSFT Extension support */
1187 btintel_set_msft_opcode(hdev,
1188 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
1189
1190 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
1191 if (err)
1192 goto exit_error;
1193 break;
1194 default:
1195 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
1196 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
1197 err = -EINVAL;
1198 goto exit_error;
1199 break;
1200 }
1201
1202 btintel_print_fseq_info(hdev);
1203exit_error:
1204 kfree_skb(skb);
1205
1206 return err;
1207}
1208
1209static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
1210{
1211 int err;
1212 struct hci_dev *hdev;
1213
1214 hdev = hci_alloc_dev();
1215 if (!hdev)
1216 return -ENOMEM;
1217
1218 hdev->bus = HCI_PCI;
1219 hci_set_drvdata(hdev, data);
1220
1221 data->hdev = hdev;
1222 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
1223
1224 hdev->manufacturer = 2;
1225 hdev->open = btintel_pcie_open;
1226 hdev->close = btintel_pcie_close;
1227 hdev->send = btintel_pcie_send_frame;
1228 hdev->setup = btintel_pcie_setup;
1229 hdev->shutdown = btintel_shutdown_combined;
1230 hdev->hw_error = btintel_hw_error;
1231 hdev->set_diag = btintel_set_diag;
1232 hdev->set_bdaddr = btintel_set_bdaddr;
1233
1234 err = hci_register_dev(hdev);
1235 if (err < 0) {
1236 BT_ERR("Failed to register to hdev (%d)", err);
1237 goto exit_error;
1238 }
1239
1240 return 0;
1241
1242exit_error:
1243 hci_free_dev(hdev);
1244 return err;
1245}
1246
1247static int btintel_pcie_probe(struct pci_dev *pdev,
1248 const struct pci_device_id *ent)
1249{
1250 int err;
1251 struct btintel_pcie_data *data;
1252
1253 if (!pdev)
1254 return -ENODEV;
1255
1256 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
1257 if (!data)
1258 return -ENOMEM;
1259
1260 data->pdev = pdev;
1261
1262 spin_lock_init(&data->irq_lock);
1263 spin_lock_init(&data->hci_rx_lock);
1264
1265 init_waitqueue_head(&data->gp0_wait_q);
1266 data->gp0_received = false;
1267
1268 init_waitqueue_head(&data->tx_wait_q);
1269 data->tx_wait_done = false;
1270
1271 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
1272 if (!data->workqueue)
1273 return -ENOMEM;
1274
1275 skb_queue_head_init(&data->rx_skb_q);
1276 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
1277
1278 data->boot_stage_cache = 0x00;
1279 data->img_resp_cache = 0x00;
1280
1281 err = btintel_pcie_config_pcie(pdev, data);
1282 if (err)
1283 goto exit_error;
1284
1285 pci_set_drvdata(pdev, data);
1286
1287 err = btintel_pcie_alloc(data);
1288 if (err)
1289 goto exit_error;
1290
1291 err = btintel_pcie_enable_bt(data);
1292 if (err)
1293 goto exit_error;
1294
1295 /* CNV information (CNVi and CNVr) is in CSR */
1296 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
1297
1298 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
1299
1300 err = btintel_pcie_start_rx(data);
1301 if (err)
1302 goto exit_error;
1303
1304 err = btintel_pcie_setup_hdev(data);
1305 if (err)
1306 goto exit_error;
1307
1308 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
1309 data->cnvr);
1310 return 0;
1311
1312exit_error:
1313 /* reset device before exit */
1314 btintel_pcie_reset_bt(data);
1315
1316 pci_clear_master(pdev);
1317
1318 pci_set_drvdata(pdev, NULL);
1319
1320 return err;
1321}
1322
1323static void btintel_pcie_remove(struct pci_dev *pdev)
1324{
1325 struct btintel_pcie_data *data;
1326
1327 data = pci_get_drvdata(pdev);
1328
1329 btintel_pcie_reset_bt(data);
1330 for (int i = 0; i < data->alloc_vecs; i++) {
1331 struct msix_entry *msix_entry;
1332
1333 msix_entry = &data->msix_entries[i];
1334 free_irq(msix_entry->vector, msix_entry);
1335 }
1336
1337 pci_free_irq_vectors(pdev);
1338
1339 btintel_pcie_release_hdev(data);
1340
1341 flush_work(&data->rx_work);
1342
1343 destroy_workqueue(data->workqueue);
1344
1345 btintel_pcie_free(data);
1346
1347 pci_clear_master(pdev);
1348
1349 pci_set_drvdata(pdev, NULL);
1350}
1351
1352static struct pci_driver btintel_pcie_driver = {
1353 .name = KBUILD_MODNAME,
1354 .id_table = btintel_pcie_table,
1355 .probe = btintel_pcie_probe,
1356 .remove = btintel_pcie_remove,
1357};
1358module_pci_driver(btintel_pcie_driver);
1359
1360MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
1361MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
1362MODULE_VERSION(VERSION);
1363MODULE_LICENSE("GPL");