Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[media] netup_unidvb: NetUP Universal DVB-S/S2/T/T2/C PCI-E card driver

Add NetUP Dual Universal CI PCIe board driver.
The board has
- two CI slots
- two I2C adapters
- SPI master bus for accessing flash memory containing
FPGA firmware

No changes required.

Signed-off-by: Kozlov Sergey <serjk@netup.ru>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>

authored by

Kozlov Sergey and committed by
Mauro Carvalho Chehab
52b1eaf4 c8946c8d

+2048 -1
+9
MAINTAINERS
··· 6637 6637 S: Supported 6638 6638 F: drivers/media/dvb-frontends/lnbh25* 6639 6639 6640 + MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices 6641 + M: Sergey Kozlov <serjk@netup.ru> 6642 + L: linux-media@vger.kernel.org 6643 + W: http://linuxtv.org/ 6644 + W: http://netup.tv/ 6645 + T: git git://linuxtv.org/media_tree.git 6646 + S: Supported 6647 + F: drivers/media/pci/netup_unidvb/* 6648 + 6640 6649 MEDIA INPUT INFRASTRUCTURE (V4L/DVB) 6641 6650 M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 6642 6651 P: LinuxTV.org Project
+1
drivers/media/pci/Kconfig
··· 49 49 source "drivers/media/pci/ngene/Kconfig" 50 50 source "drivers/media/pci/ddbridge/Kconfig" 51 51 source "drivers/media/pci/smipcie/Kconfig" 52 + source "drivers/media/pci/netup_unidvb/Kconfig" 52 53 endif 53 54 54 55 endif #MEDIA_PCI_SUPPORT
+2 -1
drivers/media/pci/Makefile
··· 12 12 ngene/ \ 13 13 ddbridge/ \ 14 14 saa7146/ \ 15 - smipcie/ 15 + smipcie/ \ 16 + netup_unidvb/ 16 17 17 18 obj-$(CONFIG_VIDEO_IVTV) += ivtv/ 18 19 obj-$(CONFIG_VIDEO_ZORAN) += zoran/
+12
drivers/media/pci/netup_unidvb/Kconfig
··· 1 + config DVB_NETUP_UNIDVB 2 + tristate "NetUP Universal DVB card support" 3 + depends on DVB_CORE && VIDEO_DEV && PCI && I2C && SPI_MASTER 4 + select VIDEOBUF2_DVB 5 + select VIDEOBUF2_VMALLOC 6 + select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT 7 + select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT 8 + select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT 9 + select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT 10 + ---help--- 11 + Support for NetUP PCI express Universal DVB card. 12 +
+9
drivers/media/pci/netup_unidvb/Makefile
··· 1 + netup-unidvb-objs += netup_unidvb_core.o 2 + netup-unidvb-objs += netup_unidvb_i2c.o 3 + netup-unidvb-objs += netup_unidvb_ci.o 4 + netup-unidvb-objs += netup_unidvb_spi.o 5 + 6 + obj-$(CONFIG_DVB_NETUP_UNIDVB) += netup-unidvb.o 7 + 8 + ccflags-y += -Idrivers/media/dvb-core 9 + ccflags-y += -Idrivers/media/dvb-frontends
+133
drivers/media/pci/netup_unidvb/netup_unidvb.h
··· 1 + /* 2 + * netup_unidvb.h 3 + * 4 + * Data type definitions for NetUP Universal Dual DVB-CI 5 + * 6 + * Copyright (C) 2014 NetUP Inc. 7 + * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru> 8 + * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + */ 20 + 21 + #include <linux/pci.h> 22 + #include <linux/i2c.h> 23 + #include <linux/workqueue.h> 24 + #include <media/v4l2-common.h> 25 + #include <media/v4l2-device.h> 26 + #include <media/videobuf2-dvb.h> 27 + #include <dvb_ca_en50221.h> 28 + 29 + #define NETUP_UNIDVB_NAME "netup_unidvb" 30 + #define NETUP_UNIDVB_VERSION "0.0.1" 31 + #define NETUP_VENDOR_ID 0x1b55 32 + #define NETUP_PCI_DEV_REVISION 0x2 33 + 34 + /* IRQ-related regisers */ 35 + #define REG_ISR 0x4890 36 + #define REG_ISR_MASKED 0x4892 37 + #define REG_IMASK_SET 0x4894 38 + #define REG_IMASK_CLEAR 0x4896 39 + /* REG_ISR register bits */ 40 + #define NETUP_UNIDVB_IRQ_SPI (1 << 0) 41 + #define NETUP_UNIDVB_IRQ_I2C0 (1 << 1) 42 + #define NETUP_UNIDVB_IRQ_I2C1 (1 << 2) 43 + #define NETUP_UNIDVB_IRQ_FRA0 (1 << 4) 44 + #define NETUP_UNIDVB_IRQ_FRA1 (1 << 5) 45 + #define NETUP_UNIDVB_IRQ_FRB0 (1 << 6) 46 + #define NETUP_UNIDVB_IRQ_FRB1 (1 << 7) 47 + #define NETUP_UNIDVB_IRQ_DMA1 (1 << 8) 48 + #define NETUP_UNIDVB_IRQ_DMA2 (1 << 9) 49 + #define NETUP_UNIDVB_IRQ_CI (1 << 10) 50 + #define NETUP_UNIDVB_IRQ_CAM0 (1 << 11) 51 + #define NETUP_UNIDVB_IRQ_CAM1 (1 << 12) 52 + 53 + struct netup_dma { 54 + u8 num; 55 + spinlock_t lock; 56 + struct netup_unidvb_dev *ndev; 57 + struct netup_dma_regs *regs; 58 + u32 ring_buffer_size; 59 + u8 *addr_virt; 60 + dma_addr_t addr_phys; 61 + u64 addr_last; 62 + u32 high_addr; 63 + u32 data_offset; 64 + u32 data_size; 65 + struct list_head free_buffers; 66 + struct work_struct work; 67 + struct timer_list timeout; 68 + }; 69 + 70 + enum netup_i2c_state { 71 + STATE_DONE, 72 + STATE_WAIT, 73 + STATE_WANT_READ, 74 + STATE_WANT_WRITE, 75 + STATE_ERROR 76 + }; 77 + 78 + struct netup_i2c_regs; 79 + 80 + struct netup_i2c { 81 + spinlock_t lock; 82 + wait_queue_head_t wq; 83 + struct i2c_adapter adap; 84 + struct netup_unidvb_dev *dev; 85 + struct netup_i2c_regs *regs; 86 + struct i2c_msg *msg; 87 + enum netup_i2c_state state; 88 + u32 xmit_size; 89 + }; 90 + 91 + struct netup_ci_state { 92 + struct dvb_ca_en50221 ca; 93 + u8 __iomem *membase8_config; 94 + u8 __iomem *membase8_io; 95 + struct netup_unidvb_dev *dev; 96 + int status; 97 + int nr; 98 + }; 99 + 100 + struct netup_spi; 101 + 102 + struct netup_unidvb_dev { 103 + struct pci_dev *pci_dev; 104 + int pci_bus; 105 + int pci_slot; 106 + int pci_func; 107 + int board_num; 108 + int old_fw; 109 + u32 __iomem *lmmio0; 110 + u8 __iomem *bmmio0; 111 + u32 __iomem *lmmio1; 112 + u8 __iomem *bmmio1; 113 + u8 *dma_virt; 114 + dma_addr_t dma_phys; 115 + u32 dma_size; 116 + struct vb2_dvb_frontends frontends[2]; 117 + struct netup_i2c i2c[2]; 118 + struct workqueue_struct *wq; 119 + struct netup_dma dma[2]; 120 + struct netup_ci_state ci[2]; 121 + struct netup_spi *spi; 122 + }; 123 + 124 + int netup_i2c_register(struct netup_unidvb_dev *ndev); 125 + void netup_i2c_unregister(struct netup_unidvb_dev *ndev); 126 + irqreturn_t netup_ci_interrupt(struct netup_unidvb_dev *ndev); 127 + irqreturn_t netup_i2c_interrupt(struct netup_i2c *i2c); 128 + irqreturn_t netup_spi_interrupt(struct netup_spi *spi); 129 + int netup_unidvb_ci_register(struct netup_unidvb_dev *dev, 130 + int num, struct pci_dev *pci_dev); 131 + void netup_unidvb_ci_unregister(struct netup_unidvb_dev *dev, int num); 132 + int netup_spi_init(struct netup_unidvb_dev *ndev); 133 + void netup_spi_release(struct netup_unidvb_dev *ndev);
+248
drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
··· 1 + /* 2 + * netup_unidvb_ci.c 3 + * 4 + * DVB CAM support for NetUP Universal Dual DVB-CI 5 + * 6 + * Copyright (C) 2014 NetUP Inc. 7 + * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru> 8 + * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + */ 20 + 21 + #include <linux/init.h> 22 + #include <linux/module.h> 23 + #include <linux/moduleparam.h> 24 + #include <linux/kmod.h> 25 + #include <linux/kernel.h> 26 + #include <linux/slab.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/delay.h> 29 + #include "netup_unidvb.h" 30 + 31 + /* CI slot 0 base address */ 32 + #define CAM0_CONFIG 0x0 33 + #define CAM0_IO 0x8000 34 + #define CAM0_MEM 0x10000 35 + #define CAM0_SZ 32 36 + /* CI slot 1 base address */ 37 + #define CAM1_CONFIG 0x20000 38 + #define CAM1_IO 0x28000 39 + #define CAM1_MEM 0x30000 40 + #define CAM1_SZ 32 41 + /* ctrlstat registers */ 42 + #define CAM_CTRLSTAT_READ_SET 0x4980 43 + #define CAM_CTRLSTAT_CLR 0x4982 44 + /* register bits */ 45 + #define BIT_CAM_STCHG (1<<0) 46 + #define BIT_CAM_PRESENT (1<<1) 47 + #define BIT_CAM_RESET (1<<2) 48 + #define BIT_CAM_BYPASS (1<<3) 49 + #define BIT_CAM_READY (1<<4) 50 + #define BIT_CAM_ERROR (1<<5) 51 + #define BIT_CAM_OVERCURR (1<<6) 52 + /* BIT_CAM_BYPASS bit shift for SLOT 1 */ 53 + #define CAM1_SHIFT 8 54 + 55 + irqreturn_t netup_ci_interrupt(struct netup_unidvb_dev *ndev) 56 + { 57 + writew(0x101, ndev->bmmio0 + CAM_CTRLSTAT_CLR); 58 + return IRQ_HANDLED; 59 + } 60 + 61 + static int netup_unidvb_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, 62 + int slot) 63 + { 64 + struct netup_ci_state *state = en50221->data; 65 + struct netup_unidvb_dev *dev = state->dev; 66 + u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0; 67 + 68 + dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT=0x%x\n", 69 + __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET)); 70 + if (slot != 0) 71 + return -EINVAL; 72 + /* pass data to CAM module */ 73 + writew(BIT_CAM_BYPASS << shift, dev->bmmio0 + CAM_CTRLSTAT_CLR); 74 + dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT=0x%x done\n", 75 + __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET)); 76 + return 0; 77 + } 78 + 79 + static int netup_unidvb_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, 80 + int slot) 81 + { 82 + struct netup_ci_state *state = en50221->data; 83 + struct netup_unidvb_dev *dev = state->dev; 84 + 85 + dev_dbg(&dev->pci_dev->dev, "%s()\n", __func__); 86 + return 0; 87 + } 88 + 89 + static int netup_unidvb_ci_slot_reset(struct dvb_ca_en50221 *en50221, 90 + int slot) 91 + { 92 + struct netup_ci_state *state = en50221->data; 93 + struct netup_unidvb_dev *dev = state->dev; 94 + unsigned long timeout = 0; 95 + u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0; 96 + u16 ci_stat = 0; 97 + int reset_counter = 3; 98 + 99 + dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT_READ_SET=0x%x\n", 100 + __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET)); 101 + reset: 102 + timeout = jiffies + msecs_to_jiffies(5000); 103 + /* start reset */ 104 + writew(BIT_CAM_RESET << shift, dev->bmmio0 + CAM_CTRLSTAT_READ_SET); 105 + dev_dbg(&dev->pci_dev->dev, "%s(): waiting for reset\n", __func__); 106 + /* wait until reset done */ 107 + while (time_before(jiffies, timeout)) { 108 + ci_stat = readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET); 109 + if (ci_stat & (BIT_CAM_READY << shift)) 110 + break; 111 + udelay(1000); 112 + } 113 + if (!(ci_stat & (BIT_CAM_READY << shift)) && reset_counter > 0) { 114 + dev_dbg(&dev->pci_dev->dev, 115 + "%s(): CAMP reset timeout! Will try again..\n", 116 + __func__); 117 + reset_counter--; 118 + goto reset; 119 + } 120 + return 0; 121 + } 122 + 123 + static int netup_unidvb_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, 124 + int slot, int open) 125 + { 126 + struct netup_ci_state *state = en50221->data; 127 + struct netup_unidvb_dev *dev = state->dev; 128 + u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0; 129 + u16 ci_stat = 0; 130 + 131 + dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT_READ_SET=0x%x\n", 132 + __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET)); 133 + ci_stat = readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET); 134 + if (ci_stat & (BIT_CAM_READY << shift)) { 135 + state->status = DVB_CA_EN50221_POLL_CAM_PRESENT | 136 + DVB_CA_EN50221_POLL_CAM_READY; 137 + } else if (ci_stat & (BIT_CAM_PRESENT << shift)) { 138 + state->status = DVB_CA_EN50221_POLL_CAM_PRESENT; 139 + } else { 140 + state->status = 0; 141 + } 142 + return state->status; 143 + } 144 + 145 + static int netup_unidvb_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221, 146 + int slot, int addr) 147 + { 148 + struct netup_ci_state *state = en50221->data; 149 + struct netup_unidvb_dev *dev = state->dev; 150 + u8 val = state->membase8_config[addr]; 151 + 152 + dev_dbg(&dev->pci_dev->dev, 153 + "%s(): addr=0x%x val=0x%x\n", __func__, addr, val); 154 + return val; 155 + } 156 + 157 + static int netup_unidvb_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221, 158 + int slot, int addr, u8 data) 159 + { 160 + struct netup_ci_state *state = en50221->data; 161 + struct netup_unidvb_dev *dev = state->dev; 162 + 163 + dev_dbg(&dev->pci_dev->dev, 164 + "%s(): addr=0x%x data=0x%x\n", __func__, addr, data); 165 + state->membase8_config[addr] = data; 166 + return 0; 167 + } 168 + 169 + static int netup_unidvb_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, 170 + int slot, u8 addr) 171 + { 172 + struct netup_ci_state *state = en50221->data; 173 + struct netup_unidvb_dev *dev = state->dev; 174 + u8 val = state->membase8_io[addr]; 175 + 176 + dev_dbg(&dev->pci_dev->dev, 177 + "%s(): addr=0x%x val=0x%x\n", __func__, addr, val); 178 + return val; 179 + } 180 + 181 + static int netup_unidvb_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, 182 + int slot, u8 addr, u8 data) 183 + { 184 + struct netup_ci_state *state = en50221->data; 185 + struct netup_unidvb_dev *dev = state->dev; 186 + 187 + dev_dbg(&dev->pci_dev->dev, 188 + "%s(): addr=0x%x data=0x%x\n", __func__, addr, data); 189 + state->membase8_io[addr] = data; 190 + return 0; 191 + } 192 + 193 + int netup_unidvb_ci_register(struct netup_unidvb_dev *dev, 194 + int num, struct pci_dev *pci_dev) 195 + { 196 + int result; 197 + struct netup_ci_state *state; 198 + 199 + if (num < 0 || num > 1) { 200 + dev_err(&pci_dev->dev, "%s(): invalid CI adapter %d\n", 201 + __func__, num); 202 + return -EINVAL; 203 + } 204 + state = &dev->ci[num]; 205 + state->nr = num; 206 + state->membase8_config = dev->bmmio1 + 207 + ((num == 0) ? CAM0_CONFIG : CAM1_CONFIG); 208 + state->membase8_io = dev->bmmio1 + 209 + ((num == 0) ? CAM0_IO : CAM1_IO); 210 + state->dev = dev; 211 + state->ca.owner = THIS_MODULE; 212 + state->ca.read_attribute_mem = netup_unidvb_ci_read_attribute_mem; 213 + state->ca.write_attribute_mem = netup_unidvb_ci_write_attribute_mem; 214 + state->ca.read_cam_control = netup_unidvb_ci_read_cam_ctl; 215 + state->ca.write_cam_control = netup_unidvb_ci_write_cam_ctl; 216 + state->ca.slot_reset = netup_unidvb_ci_slot_reset; 217 + state->ca.slot_shutdown = netup_unidvb_ci_slot_shutdown; 218 + state->ca.slot_ts_enable = netup_unidvb_ci_slot_ts_ctl; 219 + state->ca.poll_slot_status = netup_unidvb_poll_ci_slot_status; 220 + state->ca.data = state; 221 + result = dvb_ca_en50221_init(&dev->frontends[num].adapter, 222 + &state->ca, 0, 1); 223 + if (result < 0) { 224 + dev_err(&pci_dev->dev, 225 + "%s(): dvb_ca_en50221_init result %d\n", 226 + __func__, result); 227 + return result; 228 + } 229 + writew(NETUP_UNIDVB_IRQ_CI, (u16 *)(dev->bmmio0 + REG_IMASK_SET)); 230 + dev_info(&pci_dev->dev, 231 + "%s(): CI adapter %d init done\n", __func__, num); 232 + return 0; 233 + } 234 + 235 + void netup_unidvb_ci_unregister(struct netup_unidvb_dev *dev, int num) 236 + { 237 + struct netup_ci_state *state; 238 + 239 + dev_dbg(&dev->pci_dev->dev, "%s()\n", __func__); 240 + if (num < 0 || num > 1) { 241 + dev_err(&dev->pci_dev->dev, "%s(): invalid CI adapter %d\n", 242 + __func__, num); 243 + return; 244 + } 245 + state = &dev->ci[num]; 246 + dvb_ca_en50221_release(&state->ca); 247 + } 248 +
+1001
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
··· 1 + /* 2 + * netup_unidvb_core.c 3 + * 4 + * Main module for NetUP Universal Dual DVB-CI 5 + * 6 + * Copyright (C) 2014 NetUP Inc. 7 + * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru> 8 + * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + */ 20 + 21 + #include <linux/init.h> 22 + #include <linux/module.h> 23 + #include <linux/moduleparam.h> 24 + #include <linux/kmod.h> 25 + #include <linux/kernel.h> 26 + #include <linux/slab.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/delay.h> 29 + #include <linux/list.h> 30 + #include <media/videobuf2-vmalloc.h> 31 + 32 + #include "netup_unidvb.h" 33 + #include "cxd2841er.h" 34 + #include "horus3a.h" 35 + #include "ascot2e.h" 36 + #include "lnbh25.h" 37 + 38 + static int spi_enable; 39 + module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); 40 + 41 + MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card"); 42 + MODULE_AUTHOR("info@netup.ru"); 43 + MODULE_VERSION(NETUP_UNIDVB_VERSION); 44 + MODULE_LICENSE("GPL"); 45 + 46 + DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 47 + 48 + /* Avalon-MM PCI-E registers */ 49 + #define AVL_PCIE_IENR 0x50 50 + #define AVL_PCIE_ISR 0x40 51 + #define AVL_IRQ_ENABLE 0x80 52 + #define AVL_IRQ_ASSERTED 0x80 53 + /* GPIO registers */ 54 + #define GPIO_REG_IO 0x4880 55 + #define GPIO_REG_IO_TOGGLE 0x4882 56 + #define GPIO_REG_IO_SET 0x4884 57 + #define GPIO_REG_IO_CLEAR 0x4886 58 + /* GPIO bits */ 59 + #define GPIO_FEA_RESET (1 << 0) 60 + #define GPIO_FEB_RESET (1 << 1) 61 + #define GPIO_RFA_CTL (1 << 2) 62 + #define GPIO_RFB_CTL (1 << 3) 63 + #define GPIO_FEA_TU_RESET (1 << 4) 64 + #define GPIO_FEB_TU_RESET (1 << 5) 65 + /* DMA base address */ 66 + #define NETUP_DMA0_ADDR 0x4900 67 + #define NETUP_DMA1_ADDR 0x4940 68 + /* 8 DMA blocks * 128 packets * 188 bytes*/ 69 + #define NETUP_DMA_BLOCKS_COUNT 8 70 + #define NETUP_DMA_PACKETS_COUNT 128 71 + /* DMA status bits */ 72 + #define BIT_DMA_RUN 1 73 + #define BIT_DMA_ERROR 2 74 + #define BIT_DMA_IRQ 0x200 75 + 76 + /** 77 + * struct netup_dma_regs - the map of DMA module registers 78 + * @ctrlstat_set: Control register, write to set control bits 79 + * @ctrlstat_clear: Control register, write to clear control bits 80 + * @start_addr_lo: DMA ring buffer start address, lower part 81 + * @start_addr_hi: DMA ring buffer start address, higher part 82 + * @size: DMA ring buffer size register 83 + Bits [0-7]: DMA packet size, 188 bytes 84 + Bits [16-23]: packets count in block, 128 packets 85 + Bits [24-31]: blocks count, 8 blocks 86 + * @timeout: DMA timeout in units of 8ns 87 + For example, value of 375000000 equals to 3 sec 88 + * @curr_addr_lo: Current ring buffer head address, lower part 89 + * @curr_addr_hi: Current ring buffer head address, higher part 90 + * @stat_pkt_received: Statistic register, not tested 91 + * @stat_pkt_accepted: Statistic register, not tested 92 + * @stat_pkt_overruns: Statistic register, not tested 93 + * @stat_pkt_underruns: Statistic register, not tested 94 + * @stat_fifo_overruns: Statistic register, not tested 95 + */ 96 + struct netup_dma_regs { 97 + __le32 ctrlstat_set; 98 + __le32 ctrlstat_clear; 99 + __le32 start_addr_lo; 100 + __le32 start_addr_hi; 101 + __le32 size; 102 + __le32 timeout; 103 + __le32 curr_addr_lo; 104 + __le32 curr_addr_hi; 105 + __le32 stat_pkt_received; 106 + __le32 stat_pkt_accepted; 107 + __le32 stat_pkt_overruns; 108 + __le32 stat_pkt_underruns; 109 + __le32 stat_fifo_overruns; 110 + } __packed __aligned(1); 111 + 112 + struct netup_unidvb_buffer { 113 + struct vb2_buffer vb; 114 + struct list_head list; 115 + u32 size; 116 + }; 117 + 118 + static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc); 119 + static void netup_unidvb_queue_cleanup(struct netup_dma *dma); 120 + 121 + static struct cxd2841er_config demod_config = { 122 + .i2c_addr = 0xc8 123 + }; 124 + 125 + static struct horus3a_config horus3a_conf = { 126 + .i2c_address = 0xc0, 127 + .xtal_freq_mhz = 16, 128 + .set_tuner_callback = netup_unidvb_tuner_ctrl 129 + }; 130 + 131 + static struct ascot2e_config ascot2e_conf = { 132 + .i2c_address = 0xc2, 133 + .set_tuner_callback = netup_unidvb_tuner_ctrl 134 + }; 135 + 136 + static struct lnbh25_config lnbh25_conf = { 137 + .i2c_address = 0x10, 138 + .data2_config = LNBH25_TEN | LNBH25_EXTM 139 + }; 140 + 141 + static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc) 142 + { 143 + u8 reg, mask; 144 + struct netup_dma *dma = priv; 145 + struct netup_unidvb_dev *ndev; 146 + 147 + if (!priv) 148 + return -EINVAL; 149 + ndev = dma->ndev; 150 + dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n", 151 + __func__, dma->num, is_dvb_tc); 152 + reg = readb(ndev->bmmio0 + GPIO_REG_IO); 153 + mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL; 154 + if (!is_dvb_tc) 155 + reg |= mask; 156 + else 157 + reg &= ~mask; 158 + writeb(reg, ndev->bmmio0 + GPIO_REG_IO); 159 + return 0; 160 + } 161 + 162 + static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev) 163 + { 164 + u16 gpio_reg; 165 + 166 + /* enable PCI-E interrupts */ 167 + writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR); 168 + /* unreset frontends bits[0:1] */ 169 + writeb(0x00, ndev->bmmio0 + GPIO_REG_IO); 170 + msleep(100); 171 + gpio_reg = 172 + GPIO_FEA_RESET | GPIO_FEB_RESET | 173 + GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET | 174 + GPIO_RFA_CTL | GPIO_RFB_CTL; 175 + writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO); 176 + dev_dbg(&ndev->pci_dev->dev, 177 + "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n", 178 + __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR), 179 + (int)readb(ndev->bmmio0 + GPIO_REG_IO)); 180 + 181 + } 182 + 183 + static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable) 184 + { 185 + u32 irq_mask = (dma->num == 0 ? 186 + NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2); 187 + 188 + dev_dbg(&dma->ndev->pci_dev->dev, 189 + "%s(): DMA%d enable %d\n", __func__, dma->num, enable); 190 + if (enable) { 191 + writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set); 192 + writew(irq_mask, 193 + (u16 *)(dma->ndev->bmmio0 + REG_IMASK_SET)); 194 + } else { 195 + writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear); 196 + writew(irq_mask, 197 + (u16 *)(dma->ndev->bmmio0 + REG_IMASK_CLEAR)); 198 + } 199 + } 200 + 201 + static irqreturn_t netup_dma_interrupt(struct netup_dma *dma) 202 + { 203 + u64 addr_curr; 204 + u32 size; 205 + unsigned long flags; 206 + struct device *dev = &dma->ndev->pci_dev->dev; 207 + 208 + spin_lock_irqsave(&dma->lock, flags); 209 + addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) | 210 + (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr; 211 + /* clear IRQ */ 212 + writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear); 213 + /* sanity check */ 214 + if (addr_curr < dma->addr_phys || 215 + addr_curr > dma->addr_phys + dma->ring_buffer_size) { 216 + if (addr_curr != 0) { 217 + dev_err(dev, 218 + "%s(): addr 0x%llx not from 0x%llx:0x%llx\n", 219 + __func__, addr_curr, (u64)dma->addr_phys, 220 + (u64)(dma->addr_phys + dma->ring_buffer_size)); 221 + } 222 + goto irq_handled; 223 + } 224 + size = (addr_curr >= dma->addr_last) ? 225 + (u32)(addr_curr - dma->addr_last) : 226 + (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr)); 227 + if (dma->data_size != 0) { 228 + printk_ratelimited("%s(): lost interrupt, data size %d\n", 229 + __func__, dma->data_size); 230 + dma->data_size += size; 231 + } 232 + if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) { 233 + dma->data_size = size; 234 + dma->data_offset = (u32)(dma->addr_last - dma->addr_phys); 235 + } 236 + dma->addr_last = addr_curr; 237 + queue_work(dma->ndev->wq, &dma->work); 238 + irq_handled: 239 + spin_unlock_irqrestore(&dma->lock, flags); 240 + return IRQ_HANDLED; 241 + } 242 + 243 + static irqreturn_t netup_unidvb_isr(int irq, void *dev_id) 244 + { 245 + struct pci_dev *pci_dev = (struct pci_dev *)dev_id; 246 + struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev); 247 + u32 reg40, reg_isr; 248 + irqreturn_t iret = IRQ_NONE; 249 + 250 + /* disable interrupts */ 251 + writel(0, ndev->bmmio0 + AVL_PCIE_IENR); 252 + /* check IRQ source */ 253 + reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR); 254 + if ((reg40 & AVL_IRQ_ASSERTED) != 0) { 255 + /* IRQ is being signaled */ 256 + reg_isr = readw(ndev->bmmio0 + REG_ISR); 257 + if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) { 258 + iret = netup_i2c_interrupt(&ndev->i2c[0]); 259 + } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) { 260 + iret = netup_i2c_interrupt(&ndev->i2c[1]); 261 + } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) { 262 + iret = netup_spi_interrupt(ndev->spi); 263 + } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) { 264 + iret = netup_dma_interrupt(&ndev->dma[0]); 265 + } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) { 266 + iret = netup_dma_interrupt(&ndev->dma[1]); 267 + } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) { 268 + iret = netup_ci_interrupt(ndev); 269 + } else { 270 + dev_err(&pci_dev->dev, 271 + "%s(): unknown interrupt 0x%x\n", 272 + __func__, reg_isr); 273 + } 274 + } 275 + /* re-enable interrupts */ 276 + writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR); 277 + return iret; 278 + } 279 + 280 + static int netup_unidvb_queue_setup(struct vb2_queue *vq, 281 + const struct v4l2_format *fmt, 282 + unsigned int *nbuffers, 283 + unsigned int *nplanes, 284 + unsigned int sizes[], 285 + void *alloc_ctxs[]) 286 + { 287 + struct netup_dma *dma = vb2_get_drv_priv(vq); 288 + 289 + dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__); 290 + 291 + *nplanes = 1; 292 + if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME) 293 + *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers; 294 + sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188); 295 + dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n", 296 + __func__, *nbuffers, sizes[0]); 297 + return 0; 298 + } 299 + 300 + static int netup_unidvb_buf_prepare(struct vb2_buffer *vb) 301 + { 302 + struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 303 + struct netup_unidvb_buffer *buf = container_of(vb, 304 + struct netup_unidvb_buffer, vb); 305 + 306 + dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf); 307 + buf->size = 0; 308 + return 0; 309 + } 310 + 311 + static void netup_unidvb_buf_queue(struct vb2_buffer *vb) 312 + { 313 + unsigned long flags; 314 + struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 315 + struct netup_unidvb_buffer *buf = container_of(vb, 316 + struct netup_unidvb_buffer, vb); 317 + 318 + dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf); 319 + spin_lock_irqsave(&dma->lock, flags); 320 + list_add_tail(&buf->list, &dma->free_buffers); 321 + spin_unlock_irqrestore(&dma->lock, flags); 322 + mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000)); 323 + } 324 + 325 + static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count) 326 + { 327 + struct netup_dma *dma = vb2_get_drv_priv(q); 328 + 329 + dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__); 330 + netup_unidvb_dma_enable(dma, 1); 331 + return 0; 332 + } 333 + 334 + static void netup_unidvb_stop_streaming(struct vb2_queue *q) 335 + { 336 + struct netup_dma *dma = vb2_get_drv_priv(q); 337 + 338 + dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__); 339 + netup_unidvb_dma_enable(dma, 0); 340 + netup_unidvb_queue_cleanup(dma); 341 + } 342 + 343 + static struct vb2_ops dvb_qops = { 344 + .queue_setup = netup_unidvb_queue_setup, 345 + .buf_prepare = netup_unidvb_buf_prepare, 346 + .buf_queue = netup_unidvb_buf_queue, 347 + .start_streaming = netup_unidvb_start_streaming, 348 + .stop_streaming = netup_unidvb_stop_streaming, 349 + }; 350 + 351 + static int netup_unidvb_queue_init(struct netup_dma *dma, 352 + struct vb2_queue *vb_queue) 353 + { 354 + int res; 355 + 356 + /* Init videobuf2 queue structure */ 357 + vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 358 + vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ; 359 + vb_queue->drv_priv = dma; 360 + vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer); 361 + vb_queue->ops = &dvb_qops; 362 + vb_queue->mem_ops = &vb2_vmalloc_memops; 363 + vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 364 + res = vb2_queue_init(vb_queue); 365 + if (res != 0) { 366 + dev_err(&dma->ndev->pci_dev->dev, 367 + "%s(): vb2_queue_init failed (%d)\n", __func__, res); 368 + } 369 + return res; 370 + } 371 + 372 + static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev, 373 + int num) 374 + { 375 + struct vb2_dvb_frontend *fe0, *fe1, *fe2; 376 + 377 + if (num < 0 || num > 1) { 378 + dev_dbg(&ndev->pci_dev->dev, 379 + "%s(): unable to init DVB bus %d\n", __func__, num); 380 + return -ENODEV; 381 + } 382 + mutex_init(&ndev->frontends[num].lock); 383 + INIT_LIST_HEAD(&ndev->frontends[num].felist); 384 + if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL || 385 + vb2_dvb_alloc_frontend( 386 + &ndev->frontends[num], 2) == NULL || 387 + vb2_dvb_alloc_frontend( 388 + &ndev->frontends[num], 3) == NULL) { 389 + dev_dbg(&ndev->pci_dev->dev, 390 + "%s(): unable to to alllocate vb2_dvb_frontend\n", 391 + __func__); 392 + return -ENOMEM; 393 + } 394 + fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1); 395 + fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2); 396 + fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3); 397 + if (fe0 == NULL || fe1 == NULL || fe2 == NULL) { 398 + dev_dbg(&ndev->pci_dev->dev, 399 + "%s(): frontends has not been allocated\n", __func__); 400 + return -EINVAL; 401 + } 402 + netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq); 403 + netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq); 404 + netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq); 405 + fe0->dvb.name = "netup_fe0"; 406 + fe1->dvb.name = "netup_fe1"; 407 + fe2->dvb.name = "netup_fe2"; 408 + fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s, 409 + &demod_config, &ndev->i2c[num].adap); 410 + if (fe0->dvb.frontend == NULL) { 411 + dev_dbg(&ndev->pci_dev->dev, 412 + "%s(): unable to attach DVB-S/S2 frontend\n", 413 + __func__); 414 + goto frontend_detach; 415 + } 416 + horus3a_conf.set_tuner_priv = &ndev->dma[num]; 417 + if (!dvb_attach(horus3a_attach, fe0->dvb.frontend, 418 + &horus3a_conf, &ndev->i2c[num].adap)) { 419 + dev_dbg(&ndev->pci_dev->dev, 420 + "%s(): unable to attach DVB-S/S2 tuner frontend\n", 421 + __func__); 422 + goto frontend_detach; 423 + } 424 + if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend, 425 + &lnbh25_conf, &ndev->i2c[num].adap)) { 426 + dev_dbg(&ndev->pci_dev->dev, 427 + "%s(): unable to attach SEC frontend\n", __func__); 428 + goto frontend_detach; 429 + } 430 + /* DVB-T/T2 frontend */ 431 + fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t, 432 + &demod_config, &ndev->i2c[num].adap); 433 + if (fe1->dvb.frontend == NULL) { 434 + dev_dbg(&ndev->pci_dev->dev, 435 + "%s(): unable to attach DVB-T frontend\n", __func__); 436 + goto frontend_detach; 437 + } 438 + fe1->dvb.frontend->id = 1; 439 + ascot2e_conf.set_tuner_priv = &ndev->dma[num]; 440 + if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend, 441 + &ascot2e_conf, &ndev->i2c[num].adap)) { 442 + dev_dbg(&ndev->pci_dev->dev, 443 + "%s(): unable to attach DVB-T tuner frontend\n", 444 + __func__); 445 + goto frontend_detach; 446 + } 447 + /* DVB-C/C2 frontend */ 448 + fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c, 449 + &demod_config, &ndev->i2c[num].adap); 450 + if (fe2->dvb.frontend == NULL) { 451 + dev_dbg(&ndev->pci_dev->dev, 452 + "%s(): unable to attach DVB-C frontend\n", __func__); 453 + goto frontend_detach; 454 + } 455 + fe2->dvb.frontend->id = 2; 456 + if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend, 457 + &ascot2e_conf, &ndev->i2c[num].adap)) { 458 + dev_dbg(&ndev->pci_dev->dev, 459 + "%s(): unable to attach DVB-T/C tuner frontend\n", 460 + __func__); 461 + goto frontend_detach; 462 + } 463 + 464 + if (vb2_dvb_register_bus(&ndev->frontends[num], 465 + THIS_MODULE, NULL, 466 + &ndev->pci_dev->dev, adapter_nr, 1)) { 467 + dev_dbg(&ndev->pci_dev->dev, 468 + "%s(): unable to register DVB bus %d\n", 469 + __func__, num); 470 + goto frontend_detach; 471 + } 472 + dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num); 473 + return 0; 474 + frontend_detach: 475 + vb2_dvb_dealloc_frontends(&ndev->frontends[num]); 476 + return -EINVAL; 477 + } 478 + 479 + static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num) 480 + { 481 + if (num < 0 || num > 1) { 482 + dev_err(&ndev->pci_dev->dev, 483 + "%s(): unable to unregister DVB bus %d\n", 484 + __func__, num); 485 + return; 486 + } 487 + vb2_dvb_unregister_bus(&ndev->frontends[num]); 488 + dev_info(&ndev->pci_dev->dev, 489 + "%s(): DVB bus %d unregistered\n", __func__, num); 490 + } 491 + 492 + static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev) 493 + { 494 + int res; 495 + 496 + res = netup_unidvb_dvb_init(ndev, 0); 497 + if (res) 498 + return res; 499 + res = netup_unidvb_dvb_init(ndev, 1); 500 + if (res) { 501 + netup_unidvb_dvb_fini(ndev, 0); 502 + return res; 503 + } 504 + return 0; 505 + } 506 + 507 + static int netup_unidvb_ring_copy(struct netup_dma *dma, 508 + struct netup_unidvb_buffer *buf) 509 + { 510 + u32 copy_bytes, ring_bytes; 511 + u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size; 512 + u8 *p = vb2_plane_vaddr(&buf->vb, 0); 513 + struct netup_unidvb_dev *ndev = dma->ndev; 514 + 515 + if (p == NULL) { 516 + dev_err(&ndev->pci_dev->dev, 517 + "%s(): buffer is NULL\n", __func__); 518 + return -EINVAL; 519 + } 520 + p += buf->size; 521 + if (dma->data_offset + dma->data_size > dma->ring_buffer_size) { 522 + ring_bytes = dma->ring_buffer_size - dma->data_offset; 523 + copy_bytes = (ring_bytes > buff_bytes) ? 524 + buff_bytes : ring_bytes; 525 + memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes); 526 + p += copy_bytes; 527 + buf->size += copy_bytes; 528 + buff_bytes -= copy_bytes; 529 + dma->data_size -= copy_bytes; 530 + dma->data_offset += copy_bytes; 531 + if (dma->data_offset == dma->ring_buffer_size) 532 + dma->data_offset = 0; 533 + } 534 + if (buff_bytes > 0) { 535 + ring_bytes = dma->data_size; 536 + copy_bytes = (ring_bytes > buff_bytes) ? 537 + buff_bytes : ring_bytes; 538 + memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes); 539 + buf->size += copy_bytes; 540 + dma->data_size -= copy_bytes; 541 + dma->data_offset += copy_bytes; 542 + if (dma->data_offset == dma->ring_buffer_size) 543 + dma->data_offset = 0; 544 + } 545 + return 0; 546 + } 547 + 548 + static void netup_unidvb_dma_worker(struct work_struct *work) 549 + { 550 + struct netup_dma *dma = container_of(work, struct netup_dma, work); 551 + struct netup_unidvb_dev *ndev = dma->ndev; 552 + struct netup_unidvb_buffer *buf; 553 + unsigned long flags; 554 + 555 + spin_lock_irqsave(&dma->lock, flags); 556 + if (dma->data_size == 0) { 557 + dev_dbg(&ndev->pci_dev->dev, 558 + "%s(): data_size == 0\n", __func__); 559 + goto work_done; 560 + } 561 + while (dma->data_size > 0) { 562 + if (list_empty(&dma->free_buffers)) { 563 + dev_dbg(&ndev->pci_dev->dev, 564 + "%s(): no free buffers\n", __func__); 565 + goto work_done; 566 + } 567 + buf = list_first_entry(&dma->free_buffers, 568 + struct netup_unidvb_buffer, list); 569 + if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) { 570 + dev_dbg(&ndev->pci_dev->dev, 571 + "%s(): buffer overflow, size %d\n", 572 + __func__, buf->size); 573 + goto work_done; 574 + } 575 + if (netup_unidvb_ring_copy(dma, buf)) 576 + goto work_done; 577 + if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) { 578 + list_del(&buf->list); 579 + dev_dbg(&ndev->pci_dev->dev, 580 + "%s(): buffer %p done, size %d\n", 581 + __func__, buf, buf->size); 582 + v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 583 + vb2_set_plane_payload(&buf->vb, 0, buf->size); 584 + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 585 + } 586 + } 587 + work_done: 588 + dma->data_size = 0; 589 + spin_unlock_irqrestore(&dma->lock, flags); 590 + } 591 + 592 + static void netup_unidvb_queue_cleanup(struct netup_dma *dma) 593 + { 594 + struct netup_unidvb_buffer *buf; 595 + unsigned long flags; 596 + 597 + spin_lock_irqsave(&dma->lock, flags); 598 + while (!list_empty(&dma->free_buffers)) { 599 + buf = list_first_entry(&dma->free_buffers, 600 + struct netup_unidvb_buffer, list); 601 + list_del(&buf->list); 602 + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 603 + } 604 + spin_unlock_irqrestore(&dma->lock, flags); 605 + } 606 + 607 + static void netup_unidvb_dma_timeout(unsigned long data) 608 + { 609 + struct netup_dma *dma = (struct netup_dma *)data; 610 + struct netup_unidvb_dev *ndev = dma->ndev; 611 + 612 + dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__); 613 + netup_unidvb_queue_cleanup(dma); 614 + } 615 + 616 + static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num) 617 + { 618 + struct netup_dma *dma; 619 + struct device *dev = &ndev->pci_dev->dev; 620 + 621 + if (num < 0 || num > 1) { 622 + dev_err(dev, "%s(): unable to register DMA%d\n", 623 + __func__, num); 624 + return -ENODEV; 625 + } 626 + dma = &ndev->dma[num]; 627 + dev_info(dev, "%s(): starting DMA%d\n", __func__, num); 628 + dma->num = num; 629 + dma->ndev = ndev; 630 + spin_lock_init(&dma->lock); 631 + INIT_WORK(&dma->work, netup_unidvb_dma_worker); 632 + INIT_LIST_HEAD(&dma->free_buffers); 633 + dma->timeout.function = netup_unidvb_dma_timeout; 634 + dma->timeout.data = (unsigned long)dma; 635 + init_timer(&dma->timeout); 636 + dma->ring_buffer_size = ndev->dma_size / 2; 637 + dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num; 638 + dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys + 639 + dma->ring_buffer_size * num); 640 + dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n", 641 + __func__, num, dma->addr_virt, 642 + (unsigned long long)dma->addr_phys, 643 + dma->ring_buffer_size); 644 + memset_io(dma->addr_virt, 0, dma->ring_buffer_size); 645 + dma->addr_last = dma->addr_phys; 646 + dma->high_addr = (u32)(dma->addr_phys & 0xC0000000); 647 + dma->regs = (struct netup_dma_regs *)(num == 0 ? 648 + ndev->bmmio0 + NETUP_DMA0_ADDR : 649 + ndev->bmmio0 + NETUP_DMA1_ADDR); 650 + writel((NETUP_DMA_BLOCKS_COUNT << 24) | 651 + (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size); 652 + writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo); 653 + writel(0, &dma->regs->start_addr_hi); 654 + writel(dma->high_addr, ndev->bmmio0 + 0x1000); 655 + writel(375000000, &dma->regs->timeout); 656 + msleep(1000); 657 + writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear); 658 + return 0; 659 + } 660 + 661 + static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num) 662 + { 663 + struct netup_dma *dma; 664 + 665 + if (num < 0 || num > 1) 666 + return; 667 + dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num); 668 + dma = &ndev->dma[num]; 669 + netup_unidvb_dma_enable(dma, 0); 670 + msleep(50); 671 + cancel_work_sync(&dma->work); 672 + del_timer(&dma->timeout); 673 + } 674 + 675 + static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev) 676 + { 677 + int res; 678 + 679 + res = netup_unidvb_dma_init(ndev, 0); 680 + if (res) 681 + return res; 682 + res = netup_unidvb_dma_init(ndev, 1); 683 + if (res) { 684 + netup_unidvb_dma_fini(ndev, 0); 685 + return res; 686 + } 687 + netup_unidvb_dma_enable(&ndev->dma[0], 0); 688 + netup_unidvb_dma_enable(&ndev->dma[1], 0); 689 + return 0; 690 + } 691 + 692 + static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev, 693 + struct pci_dev *pci_dev) 694 + { 695 + int res; 696 + 697 + writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET); 698 + res = netup_unidvb_ci_register(ndev, 0, pci_dev); 699 + if (res) 700 + return res; 701 + res = netup_unidvb_ci_register(ndev, 1, pci_dev); 702 + if (res) 703 + netup_unidvb_ci_unregister(ndev, 0); 704 + return res; 705 + } 706 + 707 + static int netup_unidvb_request_mmio(struct pci_dev *pci_dev) 708 + { 709 + if (!request_mem_region(pci_resource_start(pci_dev, 0), 710 + pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) { 711 + dev_err(&pci_dev->dev, 712 + "%s(): unable to request MMIO bar 0 at 0x%llx\n", 713 + __func__, 714 + (unsigned long long)pci_resource_start(pci_dev, 0)); 715 + return -EBUSY; 716 + } 717 + if (!request_mem_region(pci_resource_start(pci_dev, 1), 718 + pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) { 719 + dev_err(&pci_dev->dev, 720 + "%s(): unable to request MMIO bar 1 at 0x%llx\n", 721 + __func__, 722 + (unsigned long long)pci_resource_start(pci_dev, 1)); 723 + release_mem_region(pci_resource_start(pci_dev, 0), 724 + pci_resource_len(pci_dev, 0)); 725 + return -EBUSY; 726 + } 727 + return 0; 728 + } 729 + 730 + static int netup_unidvb_request_modules(struct device *dev) 731 + { 732 + static const char * const modules[] = { 733 + "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL 734 + }; 735 + const char * const *curr_mod = modules; 736 + int err; 737 + 738 + while (*curr_mod != NULL) { 739 + err = request_module(*curr_mod); 740 + if (err) { 741 + dev_warn(dev, "request_module(%s) failed: %d\n", 742 + *curr_mod, err); 743 + } 744 + ++curr_mod; 745 + } 746 + return 0; 747 + } 748 + 749 + static int netup_unidvb_initdev(struct pci_dev *pci_dev, 750 + const struct pci_device_id *pci_id) 751 + { 752 + u8 board_revision; 753 + u16 board_vendor; 754 + struct netup_unidvb_dev *ndev; 755 + int old_firmware = 0; 756 + 757 + netup_unidvb_request_modules(&pci_dev->dev); 758 + 759 + /* Check card revision */ 760 + if (pci_dev->revision != NETUP_PCI_DEV_REVISION) { 761 + dev_err(&pci_dev->dev, 762 + "netup_unidvb: expected card revision %d, got %d\n", 763 + NETUP_PCI_DEV_REVISION, pci_dev->revision); 764 + dev_err(&pci_dev->dev, 765 + "Please upgrade firmware!\n"); 766 + dev_err(&pci_dev->dev, 767 + "Instructions on http://www.netup.tv\n"); 768 + old_firmware = 1; 769 + spi_enable = 1; 770 + } 771 + 772 + /* allocate device context */ 773 + ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 774 + 775 + if (!ndev) 776 + goto dev_alloc_err; 777 + memset(ndev, 0, sizeof(*ndev)); 778 + ndev->old_fw = old_firmware; 779 + ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME); 780 + if (!ndev->wq) { 781 + dev_err(&pci_dev->dev, 782 + "%s(): unable to create workqueue\n", __func__); 783 + goto wq_create_err; 784 + } 785 + ndev->pci_dev = pci_dev; 786 + ndev->pci_bus = pci_dev->bus->number; 787 + ndev->pci_slot = PCI_SLOT(pci_dev->devfn); 788 + ndev->pci_func = PCI_FUNC(pci_dev->devfn); 789 + ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot; 790 + pci_set_drvdata(pci_dev, ndev); 791 + /* PCI init */ 792 + dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n", 793 + __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot); 794 + 795 + if (pci_enable_device(pci_dev)) { 796 + dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n", 797 + __func__); 798 + goto pci_enable_err; 799 + } 800 + /* read PCI info */ 801 + pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision); 802 + pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor); 803 + if (board_vendor != NETUP_VENDOR_ID) { 804 + dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x", 805 + __func__, board_vendor); 806 + goto pci_detect_err; 807 + } 808 + dev_info(&pci_dev->dev, 809 + "%s(): board vendor 0x%x, revision 0x%x\n", 810 + __func__, board_vendor, board_revision); 811 + pci_set_master(pci_dev); 812 + if (!pci_dma_supported(pci_dev, 0xffffffff)) { 813 + dev_err(&pci_dev->dev, 814 + "%s(): 32bit PCI DMA is not supported\n", __func__); 815 + goto pci_detect_err; 816 + } 817 + dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__); 818 + /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ 819 + pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL, 820 + PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 821 + PCI_EXP_DEVCTL_NOSNOOP_EN, 0); 822 + /* Adjust PCIe completion timeout. */ 823 + pcie_capability_clear_and_set_word(pci_dev, 824 + PCI_EXP_DEVCTL2, 0xf, 0x2); 825 + 826 + if (netup_unidvb_request_mmio(pci_dev)) { 827 + dev_err(&pci_dev->dev, 828 + "%s(): unable to request MMIO regions\n", __func__); 829 + goto pci_detect_err; 830 + } 831 + ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0), 832 + pci_resource_len(pci_dev, 0)); 833 + if (!ndev->lmmio0) { 834 + dev_err(&pci_dev->dev, 835 + "%s(): unable to remap MMIO bar 0\n", __func__); 836 + goto pci_bar0_error; 837 + } 838 + ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1), 839 + pci_resource_len(pci_dev, 1)); 840 + if (!ndev->lmmio1) { 841 + dev_err(&pci_dev->dev, 842 + "%s(): unable to remap MMIO bar 1\n", __func__); 843 + goto pci_bar1_error; 844 + } 845 + ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0; 846 + ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1; 847 + dev_info(&pci_dev->dev, 848 + "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d", 849 + __func__, 850 + ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0), 851 + ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1), 852 + pci_dev->irq); 853 + if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, 854 + "netup_unidvb", pci_dev) < 0) { 855 + dev_err(&pci_dev->dev, 856 + "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); 857 + goto irq_request_err; 858 + } 859 + ndev->dma_size = 2 * 188 * 860 + NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT; 861 + ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev, 862 + ndev->dma_size, &ndev->dma_phys, GFP_KERNEL); 863 + if (!ndev->dma_virt) { 864 + dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n", 865 + __func__); 866 + goto dma_alloc_err; 867 + } 868 + netup_unidvb_dev_enable(ndev); 869 + if (spi_enable && netup_spi_init(ndev)) { 870 + dev_warn(&pci_dev->dev, 871 + "netup_unidvb: SPI flash setup failed\n"); 872 + goto spi_setup_err; 873 + } 874 + if (old_firmware) { 875 + dev_err(&pci_dev->dev, 876 + "netup_unidvb: card initialization was incomplete\n"); 877 + return 0; 878 + } 879 + if (netup_i2c_register(ndev)) { 880 + dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n"); 881 + goto i2c_setup_err; 882 + } 883 + /* enable I2C IRQs */ 884 + writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1, 885 + ndev->bmmio0 + REG_IMASK_SET); 886 + usleep_range(5000, 10000); 887 + if (netup_unidvb_dvb_setup(ndev)) { 888 + dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n"); 889 + goto dvb_setup_err; 890 + } 891 + if (netup_unidvb_ci_setup(ndev, pci_dev)) { 892 + dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n"); 893 + goto ci_setup_err; 894 + } 895 + if (netup_unidvb_dma_setup(ndev)) { 896 + dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n"); 897 + goto dma_setup_err; 898 + } 899 + dev_info(&pci_dev->dev, 900 + "netup_unidvb: device has been initialized\n"); 901 + return 0; 902 + dma_setup_err: 903 + netup_unidvb_ci_unregister(ndev, 0); 904 + netup_unidvb_ci_unregister(ndev, 1); 905 + ci_setup_err: 906 + netup_unidvb_dvb_fini(ndev, 0); 907 + netup_unidvb_dvb_fini(ndev, 1); 908 + dvb_setup_err: 909 + netup_i2c_unregister(ndev); 910 + i2c_setup_err: 911 + if (ndev->spi) 912 + netup_spi_release(ndev); 913 + spi_setup_err: 914 + dma_free_coherent(&pci_dev->dev, ndev->dma_size, 915 + ndev->dma_virt, ndev->dma_phys); 916 + dma_alloc_err: 917 + free_irq(pci_dev->irq, pci_dev); 918 + irq_request_err: 919 + iounmap(ndev->lmmio1); 920 + pci_bar1_error: 921 + iounmap(ndev->lmmio0); 922 + pci_bar0_error: 923 + release_mem_region(pci_resource_start(pci_dev, 0), 924 + pci_resource_len(pci_dev, 0)); 925 + release_mem_region(pci_resource_start(pci_dev, 1), 926 + pci_resource_len(pci_dev, 1)); 927 + pci_detect_err: 928 + pci_disable_device(pci_dev); 929 + pci_enable_err: 930 + pci_set_drvdata(pci_dev, NULL); 931 + destroy_workqueue(ndev->wq); 932 + wq_create_err: 933 + kfree(ndev); 934 + dev_alloc_err: 935 + dev_err(&pci_dev->dev, 936 + "%s(): failed to initizalize device\n", __func__); 937 + return -EIO; 938 + } 939 + 940 + static void netup_unidvb_finidev(struct pci_dev *pci_dev) 941 + { 942 + struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev); 943 + 944 + dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__); 945 + if (!ndev->old_fw) { 946 + netup_unidvb_dma_fini(ndev, 0); 947 + netup_unidvb_dma_fini(ndev, 1); 948 + netup_unidvb_ci_unregister(ndev, 0); 949 + netup_unidvb_ci_unregister(ndev, 1); 950 + netup_unidvb_dvb_fini(ndev, 0); 951 + netup_unidvb_dvb_fini(ndev, 1); 952 + netup_i2c_unregister(ndev); 953 + } 954 + if (ndev->spi) 955 + netup_spi_release(ndev); 956 + writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR); 957 + dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size, 958 + ndev->dma_virt, ndev->dma_phys); 959 + free_irq(pci_dev->irq, pci_dev); 960 + iounmap(ndev->lmmio0); 961 + iounmap(ndev->lmmio1); 962 + release_mem_region(pci_resource_start(pci_dev, 0), 963 + pci_resource_len(pci_dev, 0)); 964 + release_mem_region(pci_resource_start(pci_dev, 1), 965 + pci_resource_len(pci_dev, 1)); 966 + pci_disable_device(pci_dev); 967 + pci_set_drvdata(pci_dev, NULL); 968 + destroy_workqueue(ndev->wq); 969 + kfree(ndev); 970 + dev_info(&pci_dev->dev, 971 + "%s(): device has been successfully stopped\n", __func__); 972 + } 973 + 974 + 975 + static struct pci_device_id netup_unidvb_pci_tbl[] = { 976 + { PCI_DEVICE(0x1b55, 0x18f6) }, 977 + { 0, } 978 + }; 979 + MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl); 980 + 981 + static struct pci_driver netup_unidvb_pci_driver = { 982 + .name = "netup_unidvb", 983 + .id_table = netup_unidvb_pci_tbl, 984 + .probe = netup_unidvb_initdev, 985 + .remove = netup_unidvb_finidev, 986 + .suspend = NULL, 987 + .resume = NULL, 988 + }; 989 + 990 + static int __init netup_unidvb_init(void) 991 + { 992 + return pci_register_driver(&netup_unidvb_pci_driver); 993 + } 994 + 995 + static void __exit netup_unidvb_fini(void) 996 + { 997 + pci_unregister_driver(&netup_unidvb_pci_driver); 998 + } 999 + 1000 + module_init(netup_unidvb_init); 1001 + module_exit(netup_unidvb_fini);
+381
drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
··· 1 + /* 2 + * netup_unidvb_i2c.c 3 + * 4 + * Internal I2C bus driver for NetUP Universal Dual DVB-CI 5 + * 6 + * Copyright (C) 2014 NetUP Inc. 7 + * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru> 8 + * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + */ 20 + 21 + #include <linux/module.h> 22 + #include <linux/moduleparam.h> 23 + #include <linux/init.h> 24 + #include <linux/delay.h> 25 + #include "netup_unidvb.h" 26 + 27 + #define NETUP_I2C_BUS0_ADDR 0x4800 28 + #define NETUP_I2C_BUS1_ADDR 0x4840 29 + #define NETUP_I2C_TIMEOUT 1000 30 + 31 + /* twi_ctrl0_stat reg bits */ 32 + #define TWI_IRQEN_COMPL 0x1 33 + #define TWI_IRQEN_ANACK 0x2 34 + #define TWI_IRQEN_DNACK 0x4 35 + #define TWI_IRQ_COMPL (TWI_IRQEN_COMPL << 8) 36 + #define TWI_IRQ_ANACK (TWI_IRQEN_ANACK << 8) 37 + #define TWI_IRQ_DNACK (TWI_IRQEN_DNACK << 8) 38 + #define TWI_IRQ_TX 0x800 39 + #define TWI_IRQ_RX 0x1000 40 + #define TWI_IRQEN (TWI_IRQEN_COMPL | TWI_IRQEN_ANACK | TWI_IRQEN_DNACK) 41 + /* twi_addr_ctrl1 reg bits*/ 42 + #define TWI_TRANSFER 0x100 43 + #define TWI_NOSTOP 0x200 44 + #define TWI_SOFT_RESET 0x2000 45 + /* twi_clkdiv reg value */ 46 + #define TWI_CLKDIV 156 47 + /* fifo_stat_ctrl reg bits */ 48 + #define FIFO_IRQEN 0x8000 49 + #define FIFO_RESET 0x4000 50 + /* FIFO size */ 51 + #define FIFO_SIZE 16 52 + 53 + struct netup_i2c_fifo_regs { 54 + union { 55 + __u8 data8; 56 + __le16 data16; 57 + __le32 data32; 58 + }; 59 + __u8 padding[4]; 60 + __le16 stat_ctrl; 61 + } __packed __aligned(1); 62 + 63 + struct netup_i2c_regs { 64 + __le16 clkdiv; 65 + __le16 twi_ctrl0_stat; 66 + __le16 twi_addr_ctrl1; 67 + __le16 length; 68 + __u8 padding1[8]; 69 + struct netup_i2c_fifo_regs tx_fifo; 70 + __u8 padding2[6]; 71 + struct netup_i2c_fifo_regs rx_fifo; 72 + } __packed __aligned(1); 73 + 74 + irqreturn_t netup_i2c_interrupt(struct netup_i2c *i2c) 75 + { 76 + u16 reg, tmp; 77 + unsigned long flags; 78 + irqreturn_t iret = IRQ_HANDLED; 79 + 80 + spin_lock_irqsave(&i2c->lock, flags); 81 + reg = readw(&i2c->regs->twi_ctrl0_stat); 82 + writew(reg & ~TWI_IRQEN, &i2c->regs->twi_ctrl0_stat); 83 + dev_dbg(i2c->adap.dev.parent, 84 + "%s(): twi_ctrl0_state 0x%x\n", __func__, reg); 85 + if ((reg & TWI_IRQEN_COMPL) != 0 && (reg & TWI_IRQ_COMPL)) { 86 + dev_dbg(i2c->adap.dev.parent, 87 + "%s(): TWI_IRQEN_COMPL\n", __func__); 88 + i2c->state = STATE_DONE; 89 + goto irq_ok; 90 + } 91 + if ((reg & TWI_IRQEN_ANACK) != 0 && (reg & TWI_IRQ_ANACK)) { 92 + dev_dbg(i2c->adap.dev.parent, 93 + "%s(): TWI_IRQEN_ANACK\n", __func__); 94 + i2c->state = STATE_ERROR; 95 + goto irq_ok; 96 + } 97 + if ((reg & TWI_IRQEN_DNACK) != 0 && (reg & TWI_IRQ_DNACK)) { 98 + dev_dbg(i2c->adap.dev.parent, 99 + "%s(): TWI_IRQEN_DNACK\n", __func__); 100 + i2c->state = STATE_ERROR; 101 + goto irq_ok; 102 + } 103 + if ((reg & TWI_IRQ_RX) != 0) { 104 + tmp = readw(&i2c->regs->rx_fifo.stat_ctrl); 105 + writew(tmp & ~FIFO_IRQEN, &i2c->regs->rx_fifo.stat_ctrl); 106 + i2c->state = STATE_WANT_READ; 107 + dev_dbg(i2c->adap.dev.parent, 108 + "%s(): want read\n", __func__); 109 + goto irq_ok; 110 + } 111 + if ((reg & TWI_IRQ_TX) != 0) { 112 + tmp = readw(&i2c->regs->tx_fifo.stat_ctrl); 113 + writew(tmp & ~FIFO_IRQEN, &i2c->regs->tx_fifo.stat_ctrl); 114 + i2c->state = STATE_WANT_WRITE; 115 + dev_dbg(i2c->adap.dev.parent, 116 + "%s(): want write\n", __func__); 117 + goto irq_ok; 118 + } 119 + dev_warn(&i2c->adap.dev, "%s(): not mine interrupt\n", __func__); 120 + iret = IRQ_NONE; 121 + irq_ok: 122 + spin_unlock_irqrestore(&i2c->lock, flags); 123 + if (iret == IRQ_HANDLED) 124 + wake_up(&i2c->wq); 125 + return iret; 126 + } 127 + 128 + static void netup_i2c_reset(struct netup_i2c *i2c) 129 + { 130 + dev_dbg(i2c->adap.dev.parent, "%s()\n", __func__); 131 + i2c->state = STATE_DONE; 132 + writew(TWI_SOFT_RESET, &i2c->regs->twi_addr_ctrl1); 133 + writew(TWI_CLKDIV, &i2c->regs->clkdiv); 134 + writew(FIFO_RESET, &i2c->regs->tx_fifo.stat_ctrl); 135 + writew(FIFO_RESET, &i2c->regs->rx_fifo.stat_ctrl); 136 + writew(0x800, &i2c->regs->tx_fifo.stat_ctrl); 137 + writew(0x800, &i2c->regs->rx_fifo.stat_ctrl); 138 + } 139 + 140 + static void netup_i2c_fifo_tx(struct netup_i2c *i2c) 141 + { 142 + u8 data; 143 + u32 fifo_space = FIFO_SIZE - 144 + (readw(&i2c->regs->tx_fifo.stat_ctrl) & 0x3f); 145 + u32 msg_length = i2c->msg->len - i2c->xmit_size; 146 + 147 + msg_length = (msg_length < fifo_space ? msg_length : fifo_space); 148 + while (msg_length--) { 149 + data = i2c->msg->buf[i2c->xmit_size++]; 150 + writeb(data, &i2c->regs->tx_fifo.data8); 151 + dev_dbg(i2c->adap.dev.parent, 152 + "%s(): write 0x%02x\n", __func__, data); 153 + } 154 + if (i2c->xmit_size < i2c->msg->len) { 155 + dev_dbg(i2c->adap.dev.parent, 156 + "%s(): TX IRQ enabled\n", __func__); 157 + writew(readw(&i2c->regs->tx_fifo.stat_ctrl) | FIFO_IRQEN, 158 + &i2c->regs->tx_fifo.stat_ctrl); 159 + } 160 + } 161 + 162 + static void netup_i2c_fifo_rx(struct netup_i2c *i2c) 163 + { 164 + u8 data; 165 + u32 fifo_size = readw(&i2c->regs->rx_fifo.stat_ctrl) & 0x3f; 166 + 167 + dev_dbg(i2c->adap.dev.parent, 168 + "%s(): RX fifo size %d\n", __func__, fifo_size); 169 + while (fifo_size--) { 170 + data = readb(&i2c->regs->rx_fifo.data8); 171 + if ((i2c->msg->flags & I2C_M_RD) != 0 && 172 + i2c->xmit_size < i2c->msg->len) { 173 + i2c->msg->buf[i2c->xmit_size++] = data; 174 + dev_dbg(i2c->adap.dev.parent, 175 + "%s(): read 0x%02x\n", __func__, data); 176 + } 177 + } 178 + if (i2c->xmit_size < i2c->msg->len) { 179 + dev_dbg(i2c->adap.dev.parent, 180 + "%s(): RX IRQ enabled\n", __func__); 181 + writew(readw(&i2c->regs->rx_fifo.stat_ctrl) | FIFO_IRQEN, 182 + &i2c->regs->rx_fifo.stat_ctrl); 183 + } 184 + } 185 + 186 + static void netup_i2c_start_xfer(struct netup_i2c *i2c) 187 + { 188 + u16 rdflag = ((i2c->msg->flags & I2C_M_RD) ? 1 : 0); 189 + u16 reg = readw(&i2c->regs->twi_ctrl0_stat); 190 + 191 + writew(TWI_IRQEN | reg, &i2c->regs->twi_ctrl0_stat); 192 + writew(i2c->msg->len, &i2c->regs->length); 193 + writew(TWI_TRANSFER | (i2c->msg->addr << 1) | rdflag, 194 + &i2c->regs->twi_addr_ctrl1); 195 + dev_dbg(i2c->adap.dev.parent, 196 + "%s(): length %d twi_addr_ctrl1 0x%x twi_ctrl0_stat 0x%x\n", 197 + __func__, readw(&i2c->regs->length), 198 + readw(&i2c->regs->twi_addr_ctrl1), 199 + readw(&i2c->regs->twi_ctrl0_stat)); 200 + i2c->state = STATE_WAIT; 201 + i2c->xmit_size = 0; 202 + if (!rdflag) 203 + netup_i2c_fifo_tx(i2c); 204 + else 205 + writew(FIFO_IRQEN | readw(&i2c->regs->rx_fifo.stat_ctrl), 206 + &i2c->regs->rx_fifo.stat_ctrl); 207 + } 208 + 209 + static int netup_i2c_xfer(struct i2c_adapter *adap, 210 + struct i2c_msg *msgs, int num) 211 + { 212 + unsigned long flags; 213 + int i, trans_done, res = num; 214 + struct netup_i2c *i2c = i2c_get_adapdata(adap); 215 + u16 reg; 216 + 217 + if (num <= 0) { 218 + dev_dbg(i2c->adap.dev.parent, 219 + "%s(): num == %d\n", __func__, num); 220 + return -EINVAL; 221 + } 222 + spin_lock_irqsave(&i2c->lock, flags); 223 + if (i2c->state != STATE_DONE) { 224 + dev_dbg(i2c->adap.dev.parent, 225 + "%s(): i2c->state == %d, resetting I2C\n", 226 + __func__, i2c->state); 227 + netup_i2c_reset(i2c); 228 + } 229 + dev_dbg(i2c->adap.dev.parent, "%s() num %d\n", __func__, num); 230 + for (i = 0; i < num; i++) { 231 + i2c->msg = &msgs[i]; 232 + netup_i2c_start_xfer(i2c); 233 + trans_done = 0; 234 + while (!trans_done) { 235 + spin_unlock_irqrestore(&i2c->lock, flags); 236 + if (wait_event_timeout(i2c->wq, 237 + i2c->state != STATE_WAIT, 238 + msecs_to_jiffies(NETUP_I2C_TIMEOUT))) { 239 + spin_lock_irqsave(&i2c->lock, flags); 240 + switch (i2c->state) { 241 + case STATE_WANT_READ: 242 + netup_i2c_fifo_rx(i2c); 243 + break; 244 + case STATE_WANT_WRITE: 245 + netup_i2c_fifo_tx(i2c); 246 + break; 247 + case STATE_DONE: 248 + if ((i2c->msg->flags & I2C_M_RD) != 0 && 249 + i2c->xmit_size != i2c->msg->len) 250 + netup_i2c_fifo_rx(i2c); 251 + dev_dbg(i2c->adap.dev.parent, 252 + "%s(): msg %d OK\n", 253 + __func__, i); 254 + trans_done = 1; 255 + break; 256 + case STATE_ERROR: 257 + res = -EIO; 258 + dev_dbg(i2c->adap.dev.parent, 259 + "%s(): error state\n", 260 + __func__); 261 + goto done; 262 + default: 263 + dev_dbg(i2c->adap.dev.parent, 264 + "%s(): invalid state %d\n", 265 + __func__, i2c->state); 266 + res = -EINVAL; 267 + goto done; 268 + } 269 + if (!trans_done) { 270 + i2c->state = STATE_WAIT; 271 + reg = readw( 272 + &i2c->regs->twi_ctrl0_stat); 273 + writew(TWI_IRQEN | reg, 274 + &i2c->regs->twi_ctrl0_stat); 275 + } 276 + spin_unlock_irqrestore(&i2c->lock, flags); 277 + } else { 278 + spin_lock_irqsave(&i2c->lock, flags); 279 + dev_dbg(i2c->adap.dev.parent, 280 + "%s(): wait timeout\n", __func__); 281 + res = -ETIMEDOUT; 282 + goto done; 283 + } 284 + spin_lock_irqsave(&i2c->lock, flags); 285 + } 286 + } 287 + done: 288 + spin_unlock_irqrestore(&i2c->lock, flags); 289 + dev_dbg(i2c->adap.dev.parent, "%s(): result %d\n", __func__, res); 290 + return res; 291 + } 292 + 293 + static u32 netup_i2c_func(struct i2c_adapter *adap) 294 + { 295 + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 296 + } 297 + 298 + static const struct i2c_algorithm netup_i2c_algorithm = { 299 + .master_xfer = netup_i2c_xfer, 300 + .functionality = netup_i2c_func, 301 + }; 302 + 303 + static struct i2c_adapter netup_i2c_adapter = { 304 + .owner = THIS_MODULE, 305 + .name = NETUP_UNIDVB_NAME, 306 + .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 307 + .algo = &netup_i2c_algorithm, 308 + }; 309 + 310 + static int netup_i2c_init(struct netup_unidvb_dev *ndev, int bus_num) 311 + { 312 + int ret; 313 + struct netup_i2c *i2c; 314 + 315 + if (bus_num < 0 || bus_num > 1) { 316 + dev_err(&ndev->pci_dev->dev, 317 + "%s(): invalid bus_num %d\n", __func__, bus_num); 318 + return -EINVAL; 319 + } 320 + i2c = &ndev->i2c[bus_num]; 321 + spin_lock_init(&i2c->lock); 322 + init_waitqueue_head(&i2c->wq); 323 + i2c->regs = (struct netup_i2c_regs *)(ndev->bmmio0 + 324 + (bus_num == 0 ? NETUP_I2C_BUS0_ADDR : NETUP_I2C_BUS1_ADDR)); 325 + netup_i2c_reset(i2c); 326 + i2c->adap = netup_i2c_adapter; 327 + i2c->adap.dev.parent = &ndev->pci_dev->dev; 328 + i2c_set_adapdata(&i2c->adap, i2c); 329 + ret = i2c_add_adapter(&i2c->adap); 330 + if (ret) { 331 + dev_err(&ndev->pci_dev->dev, 332 + "%s(): failed to add I2C adapter\n", __func__); 333 + return ret; 334 + } 335 + dev_info(&ndev->pci_dev->dev, 336 + "%s(): registered I2C bus %d at 0x%x\n", 337 + __func__, 338 + bus_num, (bus_num == 0 ? 339 + NETUP_I2C_BUS0_ADDR : 340 + NETUP_I2C_BUS1_ADDR)); 341 + return 0; 342 + } 343 + 344 + static void netup_i2c_remove(struct netup_unidvb_dev *ndev, int bus_num) 345 + { 346 + struct netup_i2c *i2c; 347 + 348 + if (bus_num < 0 || bus_num > 1) { 349 + dev_err(&ndev->pci_dev->dev, 350 + "%s(): invalid bus number %d\n", __func__, bus_num); 351 + return; 352 + } 353 + i2c = &ndev->i2c[bus_num]; 354 + netup_i2c_reset(i2c); 355 + /* remove adapter */ 356 + i2c_del_adapter(&i2c->adap); 357 + dev_info(&ndev->pci_dev->dev, 358 + "netup_i2c_remove: unregistered I2C bus %d\n", bus_num); 359 + } 360 + 361 + int netup_i2c_register(struct netup_unidvb_dev *ndev) 362 + { 363 + int ret; 364 + 365 + ret = netup_i2c_init(ndev, 0); 366 + if (ret) 367 + return ret; 368 + ret = netup_i2c_init(ndev, 1); 369 + if (ret) { 370 + netup_i2c_remove(ndev, 0); 371 + return ret; 372 + } 373 + return 0; 374 + } 375 + 376 + void netup_i2c_unregister(struct netup_unidvb_dev *ndev) 377 + { 378 + netup_i2c_remove(ndev, 0); 379 + netup_i2c_remove(ndev, 1); 380 + } 381 +
+252
drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
··· 1 + /* 2 + * netup_unidvb_spi.c 3 + * 4 + * Internal SPI driver for NetUP Universal Dual DVB-CI 5 + * 6 + * Copyright (C) 2014 NetUP Inc. 7 + * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru> 8 + * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + */ 20 + 21 + #include "netup_unidvb.h" 22 + #include <linux/spi/spi.h> 23 + #include <linux/spi/flash.h> 24 + #include <linux/mtd/partitions.h> 25 + #include <mtd/mtd-abi.h> 26 + 27 + #define NETUP_SPI_CTRL_IRQ 0x1000 28 + #define NETUP_SPI_CTRL_IMASK 0x2000 29 + #define NETUP_SPI_CTRL_START 0x8000 30 + #define NETUP_SPI_CTRL_LAST_CS 0x4000 31 + 32 + #define NETUP_SPI_TIMEOUT 6000 33 + 34 + enum netup_spi_state { 35 + SPI_STATE_START, 36 + SPI_STATE_DONE, 37 + }; 38 + 39 + struct netup_spi_regs { 40 + __u8 data[1024]; 41 + __le16 control_stat; 42 + __le16 clock_divider; 43 + } __packed __aligned(1); 44 + 45 + struct netup_spi { 46 + struct device *dev; 47 + struct spi_master *master; 48 + struct netup_spi_regs *regs; 49 + u8 __iomem *mmio; 50 + spinlock_t lock; 51 + wait_queue_head_t waitq; 52 + enum netup_spi_state state; 53 + }; 54 + 55 + static char netup_spi_name[64] = "fpga"; 56 + 57 + static struct mtd_partition netup_spi_flash_partitions = { 58 + .name = netup_spi_name, 59 + .size = 0x1000000, /* 16MB */ 60 + .offset = 0, 61 + .mask_flags = MTD_CAP_ROM 62 + }; 63 + 64 + static struct flash_platform_data spi_flash_data = { 65 + .name = "netup0_m25p128", 66 + .parts = &netup_spi_flash_partitions, 67 + .nr_parts = 1, 68 + }; 69 + 70 + static struct spi_board_info netup_spi_board = { 71 + .modalias = "m25p128", 72 + .max_speed_hz = 11000000, 73 + .chip_select = 0, 74 + .mode = SPI_MODE_0, 75 + .platform_data = &spi_flash_data, 76 + }; 77 + 78 + irqreturn_t netup_spi_interrupt(struct netup_spi *spi) 79 + { 80 + u16 reg; 81 + unsigned long flags; 82 + 83 + if (!spi) { 84 + dev_dbg(&spi->master->dev, 85 + "%s(): SPI not initialized\n", __func__); 86 + return IRQ_NONE; 87 + } 88 + spin_lock_irqsave(&spi->lock, flags); 89 + reg = readw(&spi->regs->control_stat); 90 + if (!(reg & NETUP_SPI_CTRL_IRQ)) { 91 + spin_unlock_irqrestore(&spi->lock, flags); 92 + dev_dbg(&spi->master->dev, 93 + "%s(): not mine interrupt\n", __func__); 94 + return IRQ_NONE; 95 + } 96 + writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); 97 + reg = readw(&spi->regs->control_stat); 98 + writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat); 99 + spi->state = SPI_STATE_DONE; 100 + wake_up(&spi->waitq); 101 + spin_unlock_irqrestore(&spi->lock, flags); 102 + dev_dbg(&spi->master->dev, 103 + "%s(): SPI interrupt handled\n", __func__); 104 + return IRQ_HANDLED; 105 + } 106 + 107 + static int netup_spi_transfer(struct spi_master *master, 108 + struct spi_message *msg) 109 + { 110 + struct netup_spi *spi = spi_master_get_devdata(master); 111 + struct spi_transfer *t; 112 + int result = 0; 113 + u32 tr_size; 114 + 115 + /* reset CS */ 116 + writew(NETUP_SPI_CTRL_LAST_CS, &spi->regs->control_stat); 117 + writew(0, &spi->regs->control_stat); 118 + list_for_each_entry(t, &msg->transfers, transfer_list) { 119 + tr_size = t->len; 120 + while (tr_size) { 121 + u32 frag_offset = t->len - tr_size; 122 + u32 frag_size = (tr_size > sizeof(spi->regs->data)) ? 123 + sizeof(spi->regs->data) : tr_size; 124 + int frag_last = 0; 125 + 126 + if (list_is_last(&t->transfer_list, 127 + &msg->transfers) && 128 + frag_offset + frag_size == t->len) { 129 + frag_last = 1; 130 + } 131 + if (t->tx_buf) { 132 + memcpy_toio(spi->regs->data, 133 + t->tx_buf + frag_offset, 134 + frag_size); 135 + } else { 136 + memset_io(spi->regs->data, 137 + 0, frag_size); 138 + } 139 + spi->state = SPI_STATE_START; 140 + writew((frag_size & 0x3ff) | 141 + NETUP_SPI_CTRL_IMASK | 142 + NETUP_SPI_CTRL_START | 143 + (frag_last ? NETUP_SPI_CTRL_LAST_CS : 0), 144 + &spi->regs->control_stat); 145 + dev_dbg(&spi->master->dev, 146 + "%s(): control_stat 0x%04x\n", 147 + __func__, readw(&spi->regs->control_stat)); 148 + wait_event_timeout(spi->waitq, 149 + spi->state != SPI_STATE_START, 150 + msecs_to_jiffies(NETUP_SPI_TIMEOUT)); 151 + if (spi->state == SPI_STATE_DONE) { 152 + if (t->rx_buf) { 153 + memcpy_fromio(t->rx_buf + frag_offset, 154 + spi->regs->data, frag_size); 155 + } 156 + } else { 157 + if (spi->state == SPI_STATE_START) { 158 + dev_dbg(&spi->master->dev, 159 + "%s(): transfer timeout\n", 160 + __func__); 161 + } else { 162 + dev_dbg(&spi->master->dev, 163 + "%s(): invalid state %d\n", 164 + __func__, spi->state); 165 + } 166 + result = -EIO; 167 + goto done; 168 + } 169 + tr_size -= frag_size; 170 + msg->actual_length += frag_size; 171 + } 172 + } 173 + done: 174 + msg->status = result; 175 + spi_finalize_current_message(master); 176 + return result; 177 + } 178 + 179 + static int netup_spi_setup(struct spi_device *spi) 180 + { 181 + return 0; 182 + } 183 + 184 + int netup_spi_init(struct netup_unidvb_dev *ndev) 185 + { 186 + struct spi_master *master; 187 + struct netup_spi *nspi; 188 + 189 + master = spi_alloc_master(&ndev->pci_dev->dev, 190 + sizeof(struct netup_spi)); 191 + if (!master) { 192 + dev_err(&ndev->pci_dev->dev, 193 + "%s(): unable to alloc SPI master\n", __func__); 194 + return -EINVAL; 195 + } 196 + nspi = spi_master_get_devdata(master); 197 + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 198 + master->bus_num = -1; 199 + master->num_chipselect = 1; 200 + master->transfer_one_message = netup_spi_transfer; 201 + master->setup = netup_spi_setup; 202 + spin_lock_init(&nspi->lock); 203 + init_waitqueue_head(&nspi->waitq); 204 + nspi->master = master; 205 + nspi->regs = (struct netup_spi_regs *)(ndev->bmmio0 + 0x4000); 206 + writew(2, &nspi->regs->clock_divider); 207 + writew(NETUP_UNIDVB_IRQ_SPI, ndev->bmmio0 + REG_IMASK_SET); 208 + ndev->spi = nspi; 209 + if (spi_register_master(master)) { 210 + ndev->spi = NULL; 211 + dev_err(&ndev->pci_dev->dev, 212 + "%s(): unable to register SPI bus\n", __func__); 213 + return -EINVAL; 214 + } 215 + snprintf(netup_spi_name, 216 + sizeof(netup_spi_name), 217 + "fpga_%02x:%02x.%01x", 218 + ndev->pci_bus, 219 + ndev->pci_slot, 220 + ndev->pci_func); 221 + if (!spi_new_device(master, &netup_spi_board)) { 222 + ndev->spi = NULL; 223 + dev_err(&ndev->pci_dev->dev, 224 + "%s(): unable to create SPI device\n", __func__); 225 + return -EINVAL; 226 + } 227 + dev_dbg(&ndev->pci_dev->dev, "%s(): SPI init OK\n", __func__); 228 + return 0; 229 + } 230 + 231 + void netup_spi_release(struct netup_unidvb_dev *ndev) 232 + { 233 + u16 reg; 234 + unsigned long flags; 235 + struct netup_spi *spi = ndev->spi; 236 + 237 + if (!spi) { 238 + dev_dbg(&spi->master->dev, 239 + "%s(): SPI not initialized\n", __func__); 240 + return; 241 + } 242 + spin_lock_irqsave(&spi->lock, flags); 243 + reg = readw(&spi->regs->control_stat); 244 + writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); 245 + reg = readw(&spi->regs->control_stat); 246 + writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat); 247 + spin_unlock_irqrestore(&spi->lock, flags); 248 + spi_unregister_master(spi->master); 249 + ndev->spi = NULL; 250 + } 251 + 252 +