Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: kpc2000: Add DMA driver

Add Daktronics DMA driver. I've added the SPDX license identifiers, Kconfig
entry, and cleaned up as many of the warnings as I could.

The AIO support code will be removed in a future patch.

Signed-off-by: Matt Sickler <matt.sickler@daktronics.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Matt Sickler and committed by
Greg Kroah-Hartman
7df95299 52c4dfce

+1181
+11
drivers/staging/kpc2000/Kconfig
··· 44 44 45 45 If unsure, say N. 46 46 47 + config KPC2000_DMA 48 + tristate "Daktronics KPC DMA controller" 49 + depends on KPC2000 50 + help 51 + Say Y here if you wish to support the Daktronics DMA controller. 52 + 53 + To compile this driver as a module, choose M here: the module 54 + will be called kpc2000_dma 55 + 56 + If unsure, say N. 57 +
+1
drivers/staging/kpc2000/Makefile
··· 3 3 obj-$(CONFIG_KPC2000) += kpc2000/ 4 4 obj-$(CONFIG_KPC2000_I2C) += kpc_i2c/ 5 5 obj-$(CONFIG_KPC2000_SPI) += kpc_spi/ 6 + obj-$(CONFIG_KPC2000_DMA) += kpc_dma/
+6
drivers/staging/kpc2000/kpc_dma/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-m := kpc_dma.o 4 + kpc_dma-objs += dma.o 5 + kpc_dma-objs += fileops.o 6 + kpc_dma-objs += kpc_dma_driver.o
+264
drivers/staging/kpc2000/kpc_dma/dma.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #include <linux/init.h> 3 + #include <linux/module.h> 4 + #include <linux/types.h> 5 + #include <asm/io.h> 6 + #include <linux/export.h> 7 + #include <linux/slab.h> 8 + #include <linux/platform_device.h> 9 + #include <linux/fs.h> 10 + #include <linux/rwsem.h> 11 + #include "kpc_dma_driver.h" 12 + 13 + /********** IRQ Handlers **********/ 14 + static 15 + irqreturn_t ndd_irq_handler(int irq, void *dev_id) 16 + { 17 + struct kpc_dma_device *ldev = (struct kpc_dma_device*)dev_id; 18 + 19 + if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev))) 20 + schedule_work(&ldev->irq_work); 21 + 22 + return IRQ_HANDLED; 23 + } 24 + 25 + static 26 + void ndd_irq_worker(struct work_struct *ws) 27 + { 28 + struct kpc_dma_descriptor *cur; 29 + struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work); 30 + lock_engine(eng); 31 + 32 + if (GetEngineCompletePtr(eng) == 0) 33 + goto out; 34 + 35 + if (eng->desc_completed->MyDMAAddr == GetEngineCompletePtr(eng)) 36 + goto out; 37 + 38 + cur = eng->desc_completed; 39 + do { 40 + cur = cur->Next; 41 + dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd); 42 + BUG_ON(cur == eng->desc_next); // Ordering failure. 43 + 44 + if (cur->DescControlFlags & DMA_DESC_CTL_SOP){ 45 + eng->accumulated_bytes = 0; 46 + eng->accumulated_flags = 0; 47 + } 48 + 49 + eng->accumulated_bytes += cur->DescByteCount; 50 + if (cur->DescStatusFlags & DMA_DESC_STS_ERROR) 51 + eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_ERROR; 52 + 53 + if (cur->DescStatusFlags & DMA_DESC_STS_SHORT) 54 + eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_SHORT; 55 + 56 + if (cur->DescControlFlags & DMA_DESC_CTL_EOP){ 57 + if (cur->acd) 58 + transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE); 59 + } 60 + 61 + eng->desc_completed = cur; 62 + } while (cur->MyDMAAddr != GetEngineCompletePtr(eng)); 63 + 64 + out: 65 + SetClearEngineControl(eng, ENG_CTL_IRQ_ACTIVE, 0); 66 + 67 + unlock_engine(eng); 68 + } 69 + 70 + 71 + /********** DMA Engine Init/Teardown **********/ 72 + void start_dma_engine(struct kpc_dma_device *eng) 73 + { 74 + eng->desc_next = eng->desc_pool_first; 75 + eng->desc_completed = eng->desc_pool_last; 76 + 77 + // Setup the engine pointer registers 78 + SetEngineNextPtr(eng, eng->desc_pool_first); 79 + SetEngineSWPtr(eng, eng->desc_pool_first); 80 + ClearEngineCompletePtr(eng); 81 + 82 + WriteEngineControl(eng, ENG_CTL_DMA_ENABLE | ENG_CTL_IRQ_ENABLE); 83 + } 84 + 85 + int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt) 86 + { 87 + u32 caps; 88 + struct kpc_dma_descriptor * cur; 89 + struct kpc_dma_descriptor * next; 90 + dma_addr_t next_handle; 91 + dma_addr_t head_handle; 92 + unsigned int i; 93 + int rv; 94 + dev_dbg(&eng->pldev->dev, "Setting up DMA engine [%p]\n", eng); 95 + 96 + caps = GetEngineCapabilities(eng); 97 + 98 + if (WARN(!(caps & ENG_CAP_PRESENT), "setup_dma_engine() called for DMA Engine at %p which isn't present in hardware!\n", eng)) 99 + return -ENXIO; 100 + 101 + if (caps & ENG_CAP_DIRECTION){ 102 + eng->dir = DMA_FROM_DEVICE; 103 + } else { 104 + eng->dir = DMA_TO_DEVICE; 105 + } 106 + 107 + eng->desc_pool_cnt = desc_cnt; 108 + eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096); 109 + 110 + eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle); 111 + if (!eng->desc_pool_first){ 112 + dev_err(&eng->pldev->dev, "setup_dma_engine: couldn't allocate desc_pool_first!\n"); 113 + dma_pool_destroy(eng->desc_pool); 114 + return -ENOMEM; 115 + } 116 + 117 + eng->desc_pool_first->MyDMAAddr = head_handle; 118 + clear_desc(eng->desc_pool_first); 119 + 120 + cur = eng->desc_pool_first; 121 + for (i = 1 ; i < eng->desc_pool_cnt ; i++){ 122 + next = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &next_handle); 123 + if (next == NULL) 124 + goto done_alloc; 125 + 126 + clear_desc(next); 127 + next->MyDMAAddr = next_handle; 128 + 129 + cur->DescNextDescPtr = next_handle; 130 + cur->Next = next; 131 + cur = next; 132 + } 133 + 134 + done_alloc: 135 + // Link the last descriptor back to the first, so it's a circular linked list 136 + cur->Next = eng->desc_pool_first; 137 + cur->DescNextDescPtr = eng->desc_pool_first->MyDMAAddr; 138 + 139 + eng->desc_pool_last = cur; 140 + eng->desc_completed = eng->desc_pool_last; 141 + 142 + // Setup work queue 143 + INIT_WORK(&eng->irq_work, ndd_irq_worker); 144 + 145 + // Grab IRQ line 146 + rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng); 147 + if (rv){ 148 + dev_err(&eng->pldev->dev, "setup_dma_engine: failed to request_irq: %d\n", rv); 149 + return rv; 150 + } 151 + 152 + // Turn on the engine! 153 + start_dma_engine(eng); 154 + unlock_engine(eng); 155 + 156 + return 0; 157 + } 158 + 159 + void stop_dma_engine(struct kpc_dma_device *eng) 160 + { 161 + unsigned long timeout; 162 + dev_dbg(&eng->pldev->dev, "Destroying DMA engine [%p]\n", eng); 163 + 164 + // Disable the descriptor engine 165 + WriteEngineControl(eng, 0); 166 + 167 + // Wait for descriptor engine to finish current operaion 168 + timeout = jiffies + (HZ / 2); 169 + while (GetEngineControl(eng) & ENG_CTL_DMA_RUNNING){ 170 + if (time_after(jiffies, timeout)){ 171 + dev_crit(&eng->pldev->dev, "DMA_RUNNING still asserted!\n"); 172 + break; 173 + } 174 + } 175 + 176 + // Request a reset 177 + WriteEngineControl(eng, ENG_CTL_DMA_RESET_REQUEST); 178 + 179 + // Wait for reset request to be processed 180 + timeout = jiffies + (HZ / 2); 181 + while (GetEngineControl(eng) & (ENG_CTL_DMA_RUNNING | ENG_CTL_DMA_RESET_REQUEST)){ 182 + if (time_after(jiffies, timeout)){ 183 + dev_crit(&eng->pldev->dev, "ENG_CTL_DMA_RESET_REQUEST still asserted!\n"); 184 + break; 185 + } 186 + } 187 + 188 + // Request a reset 189 + WriteEngineControl(eng, ENG_CTL_DMA_RESET); 190 + 191 + // And wait for reset to complete 192 + timeout = jiffies + (HZ / 2); 193 + while (GetEngineControl(eng) & ENG_CTL_DMA_RESET){ 194 + if (time_after(jiffies, timeout)){ 195 + dev_crit(&eng->pldev->dev, "DMA_RESET still asserted!\n"); 196 + break; 197 + } 198 + } 199 + 200 + // Clear any persistent bits just to make sure there is no residue from the reset 201 + SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0); 202 + 203 + // Reset performance counters 204 + 205 + // Completely disable the engine 206 + WriteEngineControl(eng, 0); 207 + } 208 + 209 + void destroy_dma_engine(struct kpc_dma_device *eng) 210 + { 211 + struct kpc_dma_descriptor * cur; 212 + dma_addr_t cur_handle; 213 + unsigned int i; 214 + 215 + stop_dma_engine(eng); 216 + 217 + cur = eng->desc_pool_first; 218 + cur_handle = eng->desc_pool_first->MyDMAAddr; 219 + 220 + for (i = 0 ; i < eng->desc_pool_cnt ; i++){ 221 + struct kpc_dma_descriptor *next = cur->Next; 222 + dma_addr_t next_handle = cur->DescNextDescPtr; 223 + dma_pool_free(eng->desc_pool, cur, cur_handle); 224 + cur_handle = next_handle; 225 + cur = next; 226 + } 227 + 228 + dma_pool_destroy(eng->desc_pool); 229 + 230 + free_irq(eng->irq, eng); 231 + } 232 + 233 + 234 + 235 + /********** Helper Functions **********/ 236 + int count_descriptors_available(struct kpc_dma_device *eng) 237 + { 238 + u32 count = 0; 239 + struct kpc_dma_descriptor *cur = eng->desc_next; 240 + while (cur != eng->desc_completed){ 241 + BUG_ON(cur == NULL); 242 + count++; 243 + cur = cur->Next; 244 + } 245 + return count; 246 + } 247 + 248 + void clear_desc(struct kpc_dma_descriptor *desc) 249 + { 250 + if (desc == NULL) 251 + return; 252 + desc->DescByteCount = 0; 253 + desc->DescStatusErrorFlags = 0; 254 + desc->DescStatusFlags = 0; 255 + desc->DescUserControlLS = 0; 256 + desc->DescUserControlMS = 0; 257 + desc->DescCardAddrLS = 0; 258 + desc->DescBufferByteCount = 0; 259 + desc->DescCardAddrMS = 0; 260 + desc->DescControlFlags = 0; 261 + desc->DescSystemAddrLS = 0; 262 + desc->DescSystemAddrMS = 0; 263 + desc->acd = NULL; 264 + }
+420
drivers/staging/kpc2000/kpc_dma/fileops.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #include <linux/module.h> 3 + #include <linux/init.h> 4 + #include <linux/mm.h> 5 + #include <linux/kernel.h> /* printk() */ 6 + #include <linux/slab.h> /* kmalloc() */ 7 + #include <linux/fs.h> /* everything... */ 8 + #include <linux/errno.h> /* error codes */ 9 + #include <linux/types.h> /* size_t */ 10 + #include <linux/cdev.h> 11 + #include <asm/uaccess.h> /* copy_*_user */ 12 + #include <linux/aio.h> /* aio stuff */ 13 + #include <linux/highmem.h> 14 + #include <linux/pagemap.h> 15 + #include "kpc_dma_driver.h" 16 + #include "uapi.h" 17 + 18 + /********** Helper Functions **********/ 19 + static inline 20 + unsigned int count_pages(unsigned long iov_base, size_t iov_len) 21 + { 22 + unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT; 23 + unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT; 24 + return last - first + 1; 25 + } 26 + 27 + static inline 28 + unsigned int count_parts_for_sge(struct scatterlist *sg) 29 + { 30 + unsigned int sg_length = sg_dma_len(sg); 31 + sg_length += (0x80000-1); 32 + return (sg_length / 0x80000); 33 + } 34 + 35 + /********** Transfer Helpers **********/ 36 + static 37 + int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned long iov_base, size_t iov_len) 38 + { 39 + unsigned int i = 0; 40 + long rv = 0; 41 + struct kpc_dma_device *ldev; 42 + struct aio_cb_data *acd; 43 + DECLARE_COMPLETION_ONSTACK(done); 44 + u32 desc_needed = 0; 45 + struct scatterlist *sg; 46 + u32 num_descrs_avail; 47 + struct kpc_dma_descriptor *desc; 48 + unsigned int pcnt; 49 + unsigned int p; 50 + u64 card_addr; 51 + u64 dma_addr; 52 + u64 user_ctl; 53 + 54 + BUG_ON(priv == NULL); 55 + ldev = priv->ldev; 56 + BUG_ON(ldev == NULL); 57 + 58 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer(priv = [%p], kcb = [%p], iov_base = [%p], iov_len = %ld) ldev = [%p]\n", priv, kcb, (void*)iov_base, iov_len, ldev); 59 + 60 + acd = (struct aio_cb_data *) kzalloc(sizeof(struct aio_cb_data), GFP_KERNEL); 61 + if (!acd){ 62 + dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the aio data\n"); 63 + return -ENOMEM; 64 + } 65 + memset(acd, 0x66, sizeof(struct aio_cb_data)); 66 + 67 + acd->priv = priv; 68 + acd->ldev = priv->ldev; 69 + acd->cpl = &done; 70 + acd->flags = 0; 71 + acd->kcb = kcb; 72 + acd->len = iov_len; 73 + acd->page_count = count_pages(iov_base, iov_len); 74 + 75 + // Allocate an array of page pointers 76 + acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL); 77 + if (!acd->user_pages){ 78 + dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n"); 79 + rv = -ENOMEM; 80 + goto err_alloc_userpages; 81 + } 82 + 83 + // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist) 84 + down_read(&current->mm->mmap_sem); /* get memory map semaphore */ 85 + rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL); 86 + up_read(&current->mm->mmap_sem); /* release the semaphore */ 87 + if (rv != acd->page_count){ 88 + dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv); 89 + goto err_get_user_pages; 90 + } 91 + 92 + // Allocate and setup the sg_table (scatterlist entries) 93 + rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL); 94 + if (rv){ 95 + dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv); 96 + goto err_alloc_sg_table; 97 + } 98 + 99 + // Setup the DMA mapping for all the sg entries 100 + acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir); 101 + if (acd->mapped_entry_count <= 0){ 102 + dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count); 103 + goto err_dma_map_sg; 104 + } 105 + 106 + // Calculate how many descriptors are actually needed for this transfer. 107 + for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){ 108 + desc_needed += count_parts_for_sge(sg); 109 + } 110 + 111 + lock_engine(ldev); 112 + 113 + // Figoure out how many descriptors are available and return an error if there aren't enough 114 + num_descrs_avail = count_descriptors_available(ldev); 115 + dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail); 116 + if (desc_needed >= ldev->desc_pool_cnt){ 117 + dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail); 118 + rv = -EAGAIN; 119 + unlock_engine(ldev); 120 + goto err_descr_too_many; 121 + } 122 + if (desc_needed > num_descrs_avail){ 123 + dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail); 124 + rv = -EMSGSIZE; 125 + unlock_engine(ldev); 126 + goto err_descr_too_many; 127 + } 128 + 129 + // Loop through all the sg table entries and fill out a descriptor for each one. 130 + desc = ldev->desc_next; 131 + card_addr = acd->priv->card_addr; 132 + for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){ 133 + pcnt = count_parts_for_sge(sg); 134 + for (p = 0 ; p < pcnt ; p++){ 135 + // Fill out the descriptor 136 + BUG_ON(desc == NULL); 137 + clear_desc(desc); 138 + if (p != pcnt-1){ 139 + desc->DescByteCount = 0x80000; 140 + } else { 141 + desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000); 142 + } 143 + desc->DescBufferByteCount = desc->DescByteCount; 144 + 145 + desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR; 146 + if (i == 0 && p == 0) 147 + desc->DescControlFlags |= DMA_DESC_CTL_SOP; 148 + if (i == acd->mapped_entry_count-1 && p == pcnt-1) 149 + desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE; 150 + 151 + desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF); 152 + desc->DescCardAddrMS = (card_addr >> 32) & 0xF; 153 + card_addr += desc->DescByteCount; 154 + 155 + dma_addr = sg_dma_address(sg) + (p * 0x80000); 156 + desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0; 157 + desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32; 158 + 159 + user_ctl = acd->priv->user_ctl; 160 + if (i == acd->mapped_entry_count-1 && p == pcnt-1){ 161 + user_ctl = acd->priv->user_ctl_last; 162 + } 163 + desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0; 164 + desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32; 165 + 166 + if (i == acd->mapped_entry_count-1 && p == pcnt-1) 167 + desc->acd = acd; 168 + 169 + dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd); 170 + 171 + ldev->desc_next = desc->Next; 172 + desc = desc->Next; 173 + } 174 + } 175 + 176 + // Send the filled descriptors off to the hardware to process! 177 + SetEngineSWPtr(ldev, ldev->desc_next); 178 + 179 + unlock_engine(ldev); 180 + 181 + // If this is a synchronous kiocb, we need to put the calling process to sleep until the transfer is complete 182 + if (kcb == NULL || is_sync_kiocb(kcb)){ 183 + rv = wait_for_completion_interruptible(&done); 184 + // If the user aborted (rv == -ERESTARTSYS), we're no longer responsible for cleaning up the acd 185 + if (rv == -ERESTARTSYS){ 186 + acd->cpl = NULL; 187 + } 188 + if (rv == 0){ 189 + rv = acd->len; 190 + kfree(acd); 191 + } 192 + return rv; 193 + } 194 + 195 + return -EIOCBQUEUED; 196 + 197 + err_descr_too_many: 198 + unlock_engine(ldev); 199 + dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir); 200 + sg_free_table(&acd->sgt); 201 + err_dma_map_sg: 202 + err_alloc_sg_table: 203 + for (i = 0 ; i < acd->page_count ; i++){ 204 + put_page(acd->user_pages[i]); 205 + } 206 + err_get_user_pages: 207 + kfree(acd->user_pages); 208 + err_alloc_userpages: 209 + kfree(acd); 210 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer returning with error %ld\n", rv); 211 + return rv; 212 + } 213 + 214 + void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags) 215 + { 216 + unsigned int i; 217 + 218 + BUG_ON(acd == NULL); 219 + BUG_ON(acd->user_pages == NULL); 220 + BUG_ON(acd->sgt.sgl == NULL); 221 + BUG_ON(acd->ldev == NULL); 222 + BUG_ON(acd->ldev->pldev == NULL); 223 + 224 + dev_dbg(&acd->ldev->pldev->dev, "transfer_complete_cb(acd = [%p])\n", acd); 225 + 226 + for (i = 0 ; i < acd->page_count ; i++){ 227 + if (!PageReserved(acd->user_pages[i])){ 228 + set_page_dirty(acd->user_pages[i]); 229 + } 230 + } 231 + 232 + dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir); 233 + 234 + for (i = 0 ; i < acd->page_count ; i++){ 235 + put_page(acd->user_pages[i]); 236 + } 237 + 238 + sg_free_table(&acd->sgt); 239 + 240 + kfree(acd->user_pages); 241 + 242 + acd->flags = flags; 243 + 244 + if (acd->kcb == NULL || is_sync_kiocb(acd->kcb)){ 245 + if (acd->cpl){ 246 + complete(acd->cpl); 247 + } else { 248 + // There's no completion, so we're responsible for cleaning up the acd 249 + kfree(acd); 250 + } 251 + } else { 252 + #ifdef CONFIG_KPC_DMA_AIO 253 + aio_complete(acd->kcb, acd->len, acd->flags); 254 + #endif 255 + kfree(acd); 256 + } 257 + } 258 + 259 + /********** Fileops **********/ 260 + static 261 + int kpc_dma_open(struct inode *inode, struct file *filp) 262 + { 263 + struct dev_private_data *priv; 264 + struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode)); 265 + if (ldev == NULL) 266 + return -ENODEV; 267 + 268 + if (! atomic_dec_and_test(&ldev->open_count)){ 269 + atomic_inc(&ldev->open_count); 270 + return -EBUSY; /* already open */ 271 + } 272 + 273 + priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL); 274 + if (!priv) 275 + return -ENOMEM; 276 + 277 + priv->ldev = ldev; 278 + filp->private_data = priv; 279 + 280 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_open(inode = [%p], filp = [%p]) priv = [%p] ldev = [%p]\n", inode, filp, priv, priv->ldev); 281 + return 0; 282 + } 283 + 284 + static 285 + int kpc_dma_close(struct inode *inode, struct file *filp) 286 + { 287 + struct kpc_dma_descriptor *cur; 288 + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; 289 + struct kpc_dma_device *eng = priv->ldev; 290 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_close(inode = [%p], filp = [%p]) priv = [%p], ldev = [%p]\n", inode, filp, priv, priv->ldev); 291 + 292 + lock_engine(eng); 293 + 294 + stop_dma_engine(eng); 295 + 296 + cur = eng->desc_completed->Next; 297 + while (cur != eng->desc_next){ 298 + dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd); 299 + if (cur->DescControlFlags & DMA_DESC_CTL_EOP){ 300 + if (cur->acd) 301 + transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT); 302 + } 303 + 304 + clear_desc(cur); 305 + eng->desc_completed = cur; 306 + 307 + cur = cur->Next; 308 + } 309 + 310 + start_dma_engine(eng); 311 + 312 + unlock_engine(eng); 313 + 314 + atomic_inc(&priv->ldev->open_count); /* release the device */ 315 + kfree(priv); 316 + return 0; 317 + } 318 + 319 + #ifdef CONFIG_KPC_DMA_AIO 320 + static 321 + int kpc_dma_aio_cancel(struct kiocb *kcb) 322 + { 323 + struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data; 324 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_cancel(kcb = [%p]) priv = [%p], ldev = [%p]\n", kcb, priv, priv->ldev); 325 + return 0; 326 + } 327 + 328 + static 329 + ssize_t kpc_dma_aio_read(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos) 330 + { 331 + struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data; 332 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_read(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev); 333 + 334 + if (priv->ldev->dir != DMA_FROM_DEVICE) 335 + return -EMEDIUMTYPE; 336 + 337 + if (iov_count != 1){ 338 + dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_read() called with iov_count > 1!\n"); 339 + return -EFAULT; 340 + } 341 + 342 + if (!is_sync_kiocb(kcb)) 343 + kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel); 344 + return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len); 345 + } 346 + 347 + static 348 + ssize_t kpc_dma_aio_write(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos) 349 + { 350 + struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data; 351 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_write(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev); 352 + 353 + if (priv->ldev->dir != DMA_TO_DEVICE) 354 + return -EMEDIUMTYPE; 355 + 356 + if (iov_count != 1){ 357 + dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_write() called with iov_count > 1!\n"); 358 + return -EFAULT; 359 + } 360 + 361 + if (!is_sync_kiocb(kcb)) 362 + kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel); 363 + return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len); 364 + } 365 + #endif 366 + 367 + static 368 + ssize_t kpc_dma_read( struct file *filp, char __user *user_buf, size_t count, loff_t *ppos) 369 + { 370 + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; 371 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_read(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev); 372 + 373 + if (priv->ldev->dir != DMA_FROM_DEVICE) 374 + return -EMEDIUMTYPE; 375 + 376 + return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count); 377 + } 378 + 379 + static 380 + ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) 381 + { 382 + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; 383 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_write(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev); 384 + 385 + if (priv->ldev->dir != DMA_TO_DEVICE) 386 + return -EMEDIUMTYPE; 387 + 388 + return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count); 389 + } 390 + 391 + static 392 + long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param) 393 + { 394 + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; 395 + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_ioctl(filp = [%p], ioctl_num = 0x%x, ioctl_param = 0x%lx) priv = [%p], ldev = [%p]\n", filp, ioctl_num, ioctl_param, priv, priv->ldev); 396 + 397 + switch (ioctl_num){ 398 + case KND_IOCTL_SET_CARD_ADDR: priv->card_addr = ioctl_param; return priv->card_addr; 399 + case KND_IOCTL_SET_USER_CTL: priv->user_ctl = ioctl_param; return priv->user_ctl; 400 + case KND_IOCTL_SET_USER_CTL_LAST: priv->user_ctl_last = ioctl_param; return priv->user_ctl_last; 401 + case KND_IOCTL_GET_USER_STS: return priv->user_sts; 402 + } 403 + 404 + return -ENOTTY; 405 + } 406 + 407 + 408 + struct file_operations kpc_dma_fops = { 409 + .owner = THIS_MODULE, 410 + .open = kpc_dma_open, 411 + .release = kpc_dma_close, 412 + .read = kpc_dma_read, 413 + .write = kpc_dma_write, 414 + #ifdef CONFIG_KPC_DMA_AIO 415 + .aio_read = kpc_dma_aio_read, 416 + .aio_write = kpc_dma_aio_write, 417 + #endif 418 + .unlocked_ioctl = kpc_dma_ioctl, 419 + }; 420 +
+248
drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #include <linux/init.h> 3 + #include <linux/module.h> 4 + #include <linux/types.h> 5 + #include <asm/io.h> 6 + #include <linux/export.h> 7 + #include <linux/slab.h> 8 + #include <linux/platform_device.h> 9 + #include <linux/fs.h> 10 + #include <linux/rwsem.h> 11 + #include "kpc_dma_driver.h" 12 + 13 + MODULE_LICENSE("GPL"); 14 + MODULE_AUTHOR("Matt.Sickler@daktronics.com"); 15 + 16 + #define KPC_DMA_CHAR_MAJOR UNNAMED_MAJOR 17 + #define KPC_DMA_NUM_MINORS 1 << MINORBITS 18 + static DEFINE_MUTEX(kpc_dma_mtx); 19 + static int assigned_major_num; 20 + static LIST_HEAD(kpc_dma_list); 21 + 22 + 23 + /********** kpc_dma_list list management **********/ 24 + struct kpc_dma_device * kpc_dma_lookup_device(int minor) 25 + { 26 + struct kpc_dma_device *c; 27 + mutex_lock(&kpc_dma_mtx); 28 + list_for_each_entry(c, &kpc_dma_list, list) { 29 + if (c->pldev->id == minor) { 30 + goto out; 31 + } 32 + } 33 + c = NULL; // not-found case 34 + out: 35 + mutex_unlock(&kpc_dma_mtx); 36 + return c; 37 + } 38 + 39 + void kpc_dma_add_device(struct kpc_dma_device * ldev) 40 + { 41 + mutex_lock(&kpc_dma_mtx); 42 + list_add(&ldev->list, &kpc_dma_list); 43 + mutex_unlock(&kpc_dma_mtx); 44 + } 45 + 46 + void kpc_dma_del_device(struct kpc_dma_device * ldev) 47 + { 48 + mutex_lock(&kpc_dma_mtx); 49 + list_del(&ldev->list); 50 + mutex_unlock(&kpc_dma_mtx); 51 + } 52 + 53 + /********** SysFS Attributes **********/ 54 + static ssize_t show_engine_regs(struct device *dev, struct device_attribute *attr, char *buf) 55 + { 56 + struct kpc_dma_device *ldev; 57 + struct platform_device *pldev = to_platform_device(dev); 58 + if (!pldev) return 0; 59 + ldev = platform_get_drvdata(pldev); 60 + if (!ldev) return 0; 61 + 62 + return scnprintf(buf, PAGE_SIZE, 63 + "EngineControlStatus = 0x%08x\n" 64 + "RegNextDescPtr = 0x%08x\n" 65 + "RegSWDescPtr = 0x%08x\n" 66 + "RegCompletedDescPtr = 0x%08x\n" 67 + "desc_pool_first = %p\n" 68 + "desc_pool_last = %p\n" 69 + "desc_next = %p\n" 70 + "desc_completed = %p\n", 71 + readl(ldev->eng_regs + 1), 72 + readl(ldev->eng_regs + 2), 73 + readl(ldev->eng_regs + 3), 74 + readl(ldev->eng_regs + 4), 75 + ldev->desc_pool_first, 76 + ldev->desc_pool_last, 77 + ldev->desc_next, 78 + ldev->desc_completed 79 + ); 80 + } 81 + DEVICE_ATTR(engine_regs, 0444, show_engine_regs, NULL); 82 + 83 + static const struct attribute * ndd_attr_list[] = { 84 + &dev_attr_engine_regs.attr, 85 + NULL, 86 + }; 87 + 88 + struct class *kpc_dma_class; 89 + 90 + 91 + /********** Platform Driver Functions **********/ 92 + static 93 + int kpc_dma_probe(struct platform_device *pldev) 94 + { 95 + struct resource *r = NULL; 96 + int rv = 0; 97 + dev_t dev; 98 + 99 + struct kpc_dma_device *ldev = kzalloc(sizeof(struct kpc_dma_device), GFP_KERNEL); 100 + if (!ldev){ 101 + dev_err(&pldev->dev, "kpc_dma_probe: unable to kzalloc space for kpc_dma_device\n"); 102 + rv = -ENOMEM; 103 + goto err_rv; 104 + } 105 + 106 + dev_dbg(&pldev->dev, "kpc_dma_probe(pldev = [%p]) ldev = [%p]\n", pldev, ldev); 107 + 108 + INIT_LIST_HEAD(&ldev->list); 109 + 110 + ldev->pldev = pldev; 111 + platform_set_drvdata(pldev, ldev); 112 + atomic_set(&ldev->open_count, 1); 113 + 114 + mutex_init(&ldev->sem); 115 + lock_engine(ldev); 116 + 117 + // Get Engine regs resource 118 + r = platform_get_resource(pldev, IORESOURCE_MEM, 0); 119 + if (!r){ 120 + dev_err(&ldev->pldev->dev, "kpc_dma_probe: didn't get the engine regs resource!\n"); 121 + rv = -ENXIO; 122 + goto err_kfree; 123 + } 124 + ldev->eng_regs = ioremap_nocache(r->start, resource_size(r)); 125 + if (!ldev->eng_regs){ 126 + dev_err(&ldev->pldev->dev, "kpc_dma_probe: failed to ioremap engine regs!\n"); 127 + rv = -ENXIO; 128 + goto err_kfree; 129 + } 130 + 131 + r = platform_get_resource(pldev, IORESOURCE_IRQ, 0); 132 + if (!r){ 133 + dev_err(&ldev->pldev->dev, "kpc_dma_probe: didn't get the IRQ resource!\n"); 134 + rv = -ENXIO; 135 + goto err_kfree; 136 + } 137 + ldev->irq = r->start; 138 + 139 + // Setup miscdev struct 140 + dev = MKDEV(assigned_major_num, pldev->id); 141 + ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev, "kpc_dma%d", pldev->id); 142 + if (IS_ERR(ldev->kpc_dma_dev)){ 143 + dev_err(&ldev->pldev->dev, "kpc_dma_probe: device_create failed: %d\n", rv); 144 + goto err_kfree; 145 + } 146 + 147 + // Setup the DMA engine 148 + rv = setup_dma_engine(ldev, 30); 149 + if (rv){ 150 + dev_err(&ldev->pldev->dev, "kpc_dma_probe: failed to setup_dma_engine: %d\n", rv); 151 + goto err_misc_dereg; 152 + } 153 + 154 + // Setup the sysfs files 155 + rv = sysfs_create_files(&(ldev->pldev->dev.kobj), ndd_attr_list); 156 + if (rv){ 157 + dev_err(&ldev->pldev->dev, "kpc_dma_probe: Failed to add sysfs files: %d\n", rv); 158 + goto err_destroy_eng; 159 + } 160 + 161 + kpc_dma_add_device(ldev); 162 + 163 + return 0; 164 + 165 + err_destroy_eng: 166 + destroy_dma_engine(ldev); 167 + err_misc_dereg: 168 + device_destroy(kpc_dma_class, dev); 169 + err_kfree: 170 + kfree(ldev); 171 + err_rv: 172 + return rv; 173 + } 174 + 175 + static 176 + int kpc_dma_remove(struct platform_device *pldev) 177 + { 178 + struct kpc_dma_device *ldev = platform_get_drvdata(pldev); 179 + if (!ldev) 180 + return -ENXIO; 181 + 182 + dev_dbg(&ldev->pldev->dev, "kpc_dma_remove(pldev = [%p]) ldev = [%p]\n", pldev, ldev); 183 + 184 + lock_engine(ldev); 185 + sysfs_remove_files(&(ldev->pldev->dev.kobj), ndd_attr_list); 186 + destroy_dma_engine(ldev); 187 + kpc_dma_del_device(ldev); 188 + device_destroy(kpc_dma_class, MKDEV(assigned_major_num, ldev->pldev->id)); 189 + kfree(ldev); 190 + 191 + return 0; 192 + } 193 + 194 + 195 + /********** Driver Functions **********/ 196 + struct platform_driver kpc_dma_plat_driver_i = { 197 + .probe = kpc_dma_probe, 198 + .remove = kpc_dma_remove, 199 + .driver = { 200 + .name = KP_DRIVER_NAME_DMA_CONTROLLER, 201 + .owner = THIS_MODULE, 202 + }, 203 + }; 204 + 205 + static 206 + int __init kpc_dma_driver_init(void) 207 + { 208 + int err; 209 + 210 + err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma", &kpc_dma_fops); 211 + if (err < 0){ 212 + pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n", KPC_DMA_CHAR_MAJOR, err); 213 + goto fail_chrdev_register; 214 + } 215 + assigned_major_num = err; 216 + 217 + kpc_dma_class = class_create(THIS_MODULE, "kpc_dma"); 218 + err = PTR_ERR(kpc_dma_class); 219 + if (IS_ERR(kpc_dma_class)){ 220 + pr_err("Can't create class kpc_dma (err = %d)\n", err); 221 + goto fail_class_create; 222 + } 223 + 224 + err = platform_driver_register(&kpc_dma_plat_driver_i); 225 + if (err){ 226 + pr_err("Can't register platform driver for kpc_dma (err = %d)\n", err); 227 + goto fail_platdriver_register; 228 + } 229 + 230 + return err; 231 + 232 + fail_platdriver_register: 233 + class_destroy(kpc_dma_class); 234 + fail_class_create: 235 + __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma"); 236 + fail_chrdev_register: 237 + return err; 238 + } 239 + module_init(kpc_dma_driver_init); 240 + 241 + static 242 + void __exit kpc_dma_driver_exit(void) 243 + { 244 + platform_driver_unregister(&kpc_dma_plat_driver_i); 245 + class_destroy(kpc_dma_class); 246 + __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma"); 247 + } 248 + module_exit(kpc_dma_driver_exit);
+220
drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #ifndef KPC_DMA_DRIVER_H 3 + #define KPC_DMA_DRIVER_H 4 + #include <linux/platform_device.h> 5 + #include <linux/cdev.h> 6 + #include <linux/kfifo.h> 7 + #include <linux/list.h> 8 + #include <linux/spinlock.h> 9 + #include <linux/sched.h> 10 + #include <linux/miscdevice.h> 11 + #include <linux/rwsem.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/dmapool.h> 14 + #include <linux/pci.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/workqueue.h> 17 + #include <linux/aio.h> 18 + #include <linux/bitops.h> 19 + #include "../kpc.h" 20 + 21 + 22 + struct kp2000_device; 23 + struct kpc_dma_device { 24 + struct list_head list; 25 + struct platform_device *pldev; 26 + u32 __iomem *eng_regs; 27 + struct device *kpc_dma_dev; 28 + struct kobject kobj; 29 + char name[16]; 30 + 31 + int dir; // DMA_FROM_DEVICE || DMA_TO_DEVICE 32 + struct mutex sem; 33 + unsigned int irq; 34 + struct work_struct irq_work; 35 + 36 + atomic_t open_count; 37 + 38 + size_t accumulated_bytes; 39 + u32 accumulated_flags; 40 + 41 + // Descriptor "Pool" housekeeping 42 + u32 desc_pool_cnt; 43 + struct dma_pool *desc_pool; 44 + struct kpc_dma_descriptor *desc_pool_first; 45 + struct kpc_dma_descriptor *desc_pool_last; 46 + 47 + struct kpc_dma_descriptor *desc_next; 48 + struct kpc_dma_descriptor *desc_completed; 49 + }; 50 + 51 + struct dev_private_data { 52 + struct kpc_dma_device *ldev; 53 + u64 card_addr; 54 + u64 user_ctl; 55 + u64 user_ctl_last; 56 + u64 user_sts; 57 + }; 58 + 59 + struct kpc_dma_device * kpc_dma_lookup_device(int minor); 60 + 61 + extern struct file_operations kpc_dma_fops; 62 + 63 + #define ENG_CAP_PRESENT 0x00000001 64 + #define ENG_CAP_DIRECTION 0x00000002 65 + #define ENG_CAP_TYPE_MASK 0x000000F0 66 + #define ENG_CAP_NUMBER_MASK 0x0000FF00 67 + #define ENG_CAP_CARD_ADDR_SIZE_MASK 0x007F0000 68 + #define ENG_CAP_DESC_MAX_BYTE_CNT_MASK 0x3F000000 69 + #define ENG_CAP_PERF_SCALE_MASK 0xC0000000 70 + 71 + #define ENG_CTL_IRQ_ENABLE BIT(0) 72 + #define ENG_CTL_IRQ_ACTIVE BIT(1) 73 + #define ENG_CTL_DESC_COMPLETE BIT(2) 74 + #define ENG_CTL_DESC_ALIGN_ERR BIT(3) 75 + #define ENG_CTL_DESC_FETCH_ERR BIT(4) 76 + #define ENG_CTL_SW_ABORT_ERR BIT(5) 77 + #define ENG_CTL_DESC_CHAIN_END BIT(7) 78 + #define ENG_CTL_DMA_ENABLE BIT(8) 79 + #define ENG_CTL_DMA_RUNNING BIT(10) 80 + #define ENG_CTL_DMA_WAITING BIT(11) 81 + #define ENG_CTL_DMA_WAITING_PERSIST BIT(12) 82 + #define ENG_CTL_DMA_RESET_REQUEST BIT(14) 83 + #define ENG_CTL_DMA_RESET BIT(15) 84 + #define ENG_CTL_DESC_FETCH_ERR_CLASS_MASK 0x700000 85 + 86 + struct aio_cb_data { 87 + struct dev_private_data *priv; 88 + struct kpc_dma_device *ldev; 89 + struct completion *cpl; 90 + unsigned char flags; 91 + struct kiocb *kcb; 92 + size_t len; 93 + 94 + unsigned int page_count; 95 + struct page **user_pages; 96 + struct sg_table sgt; 97 + int mapped_entry_count; 98 + }; 99 + 100 + #define ACD_FLAG_DONE 0 101 + #define ACD_FLAG_ABORT 1 102 + #define ACD_FLAG_ENG_ACCUM_ERROR 4 103 + #define ACD_FLAG_ENG_ACCUM_SHORT 5 104 + 105 + struct kpc_dma_descriptor { 106 + struct { 107 + volatile u32 DescByteCount :20; 108 + volatile u32 DescStatusErrorFlags :4; 109 + volatile u32 DescStatusFlags :8; 110 + }; 111 + volatile u32 DescUserControlLS; 112 + volatile u32 DescUserControlMS; 113 + volatile u32 DescCardAddrLS; 114 + struct { 115 + volatile u32 DescBufferByteCount :20; 116 + volatile u32 DescCardAddrMS :4; 117 + volatile u32 DescControlFlags :8; 118 + }; 119 + volatile u32 DescSystemAddrLS; 120 + volatile u32 DescSystemAddrMS; 121 + volatile u32 DescNextDescPtr; 122 + 123 + dma_addr_t MyDMAAddr; 124 + struct kpc_dma_descriptor *Next; 125 + 126 + struct aio_cb_data *acd; 127 + } __attribute__((packed)); 128 + // DescControlFlags: 129 + #define DMA_DESC_CTL_SOP BIT(7) 130 + #define DMA_DESC_CTL_EOP BIT(6) 131 + #define DMA_DESC_CTL_AFIFO BIT(2) 132 + #define DMA_DESC_CTL_IRQONERR BIT(1) 133 + #define DMA_DESC_CTL_IRQONDONE BIT(0) 134 + // DescStatusFlags: 135 + #define DMA_DESC_STS_SOP BIT(7) 136 + #define DMA_DESC_STS_EOP BIT(6) 137 + #define DMA_DESC_STS_ERROR BIT(4) 138 + #define DMA_DESC_STS_USMSZ BIT(3) 139 + #define DMA_DESC_STS_USLSZ BIT(2) 140 + #define DMA_DESC_STS_SHORT BIT(1) 141 + #define DMA_DESC_STS_COMPLETE BIT(0) 142 + // DescStatusErrorFlags: 143 + #define DMA_DESC_ESTS_ECRC BIT(2) 144 + #define DMA_DESC_ESTS_POISON BIT(1) 145 + #define DMA_DESC_ESTS_UNSUCCESSFUL BIT(0) 146 + 147 + #define DMA_DESC_ALIGNMENT 0x20 148 + 149 + static inline 150 + u32 GetEngineCapabilities(struct kpc_dma_device *eng) 151 + { 152 + return readl(eng->eng_regs + 0); 153 + } 154 + 155 + static inline 156 + void WriteEngineControl(struct kpc_dma_device *eng, u32 value) 157 + { 158 + writel(value, eng->eng_regs + 1); 159 + } 160 + static inline 161 + u32 GetEngineControl(struct kpc_dma_device *eng) 162 + { 163 + return readl(eng->eng_regs + 1); 164 + } 165 + static inline 166 + void SetClearEngineControl(struct kpc_dma_device *eng, u32 set_bits, u32 clear_bits) 167 + { 168 + u32 val = GetEngineControl(eng); 169 + val |= set_bits; 170 + val &= ~clear_bits; 171 + WriteEngineControl(eng, val); 172 + } 173 + 174 + static inline 175 + void SetEngineNextPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor * desc) 176 + { 177 + writel(desc->MyDMAAddr, eng->eng_regs + 2); 178 + } 179 + static inline 180 + void SetEngineSWPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor * desc) 181 + { 182 + writel(desc->MyDMAAddr, eng->eng_regs + 3); 183 + } 184 + static inline 185 + void ClearEngineCompletePtr(struct kpc_dma_device *eng) 186 + { 187 + writel(0, eng->eng_regs + 4); 188 + } 189 + static inline 190 + u32 GetEngineCompletePtr(struct kpc_dma_device *eng) 191 + { 192 + return readl(eng->eng_regs + 4); 193 + } 194 + 195 + static inline 196 + void lock_engine(struct kpc_dma_device *eng) 197 + { 198 + BUG_ON(eng == NULL); 199 + mutex_lock(&eng->sem); 200 + } 201 + 202 + static inline 203 + void unlock_engine(struct kpc_dma_device *eng) 204 + { 205 + BUG_ON(eng == NULL); 206 + mutex_unlock(&eng->sem); 207 + } 208 + 209 + 210 + /// Shared Functions 211 + void start_dma_engine(struct kpc_dma_device *eng); 212 + int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt); 213 + void stop_dma_engine(struct kpc_dma_device *eng); 214 + void destroy_dma_engine(struct kpc_dma_device *eng); 215 + void clear_desc(struct kpc_dma_descriptor *desc); 216 + int count_descriptors_available(struct kpc_dma_device *eng); 217 + void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags); 218 + 219 + #endif /* KPC_DMA_DRIVER_H */ 220 +
+11
drivers/staging/kpc2000/kpc_dma/uapi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #ifndef KPC_DMA_DRIVER_UAPI_H_ 3 + #define KPC_DMA_DRIVER_UAPI_H_ 4 + #include <linux/ioctl.h> 5 + 6 + #define KND_IOCTL_SET_CARD_ADDR _IOW('k', 1, __u32) 7 + #define KND_IOCTL_SET_USER_CTL _IOW('k', 2, __u64) 8 + #define KND_IOCTL_SET_USER_CTL_LAST _IOW('k', 4, __u64) 9 + #define KND_IOCTL_GET_USER_STS _IOR('k', 3, __u64) 10 + 11 + #endif /* KPC_DMA_DRIVER_UAPI_H_ */