Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.3-rc7 441 lines 13 kB view raw
1/* 2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in the 15 * file called COPYING. 16 */ 17#ifndef IOATDMA_H 18#define IOATDMA_H 19 20#include <linux/dmaengine.h> 21#include <linux/init.h> 22#include <linux/dmapool.h> 23#include <linux/cache.h> 24#include <linux/pci_ids.h> 25#include <linux/circ_buf.h> 26#include <linux/interrupt.h> 27#include "registers.h" 28#include "hw.h" 29 30#define IOAT_DMA_VERSION "4.00" 31 32#define IOAT_DMA_DCA_ANY_CPU ~0 33 34#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) 35#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) 36#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) 37 38#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) 39 40/* ioat hardware assumes at least two sources for raid operations */ 41#define src_cnt_to_sw(x) ((x) + 2) 42#define src_cnt_to_hw(x) ((x) - 2) 43#define ndest_to_sw(x) ((x) + 1) 44#define ndest_to_hw(x) ((x) - 1) 45#define src16_cnt_to_sw(x) ((x) + 9) 46#define src16_cnt_to_hw(x) ((x) - 9) 47 48/* 49 * workaround for IOAT ver.3.0 null descriptor issue 50 * (channel returns error when size is 0) 51 */ 52#define NULL_DESC_BUFFER_SIZE 1 53 54enum ioat_irq_mode { 55 IOAT_NOIRQ = 0, 56 IOAT_MSIX, 57 IOAT_MSI, 58 IOAT_INTX 59}; 60 61/** 62 * struct ioatdma_device - internal representation of a IOAT device 63 * @pdev: PCI-Express device 64 * @reg_base: MMIO register space base address 65 * @dma_pool: for allocating DMA descriptors 66 * @completion_pool: DMA buffers for completion ops 67 * @sed_hw_pool: DMA super descriptor pools 68 * @dma_dev: embedded struct dma_device 69 * @version: version of ioatdma device 70 * @msix_entries: irq handlers 71 * @idx: per channel data 72 * @dca: direct cache access context 73 * @irq_mode: interrupt mode (INTX, MSI, MSIX) 74 * @cap: read DMA capabilities register 75 */ 76struct ioatdma_device { 77 struct pci_dev *pdev; 78 void __iomem *reg_base; 79 struct pci_pool *dma_pool; 80 struct pci_pool *completion_pool; 81#define MAX_SED_POOLS 5 82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 83 struct dma_device dma_dev; 84 u8 version; 85 struct msix_entry msix_entries[4]; 86 struct ioatdma_chan *idx[4]; 87 struct dca_provider *dca; 88 enum ioat_irq_mode irq_mode; 89 u32 cap; 90}; 91 92struct ioatdma_chan { 93 struct dma_chan dma_chan; 94 void __iomem *reg_base; 95 dma_addr_t last_completion; 96 spinlock_t cleanup_lock; 97 unsigned long state; 98 #define IOAT_COMPLETION_ACK 1 99 #define IOAT_RESET_PENDING 2 100 #define IOAT_KOBJ_INIT_FAIL 3 101 #define IOAT_RESHAPE_PENDING 4 102 #define IOAT_RUN 5 103 #define IOAT_CHAN_ACTIVE 6 104 struct timer_list timer; 105 #define COMPLETION_TIMEOUT msecs_to_jiffies(100) 106 #define IDLE_TIMEOUT msecs_to_jiffies(2000) 107 #define RESET_DELAY msecs_to_jiffies(100) 108 struct ioatdma_device *ioat_dma; 109 dma_addr_t completion_dma; 110 u64 *completion; 111 struct tasklet_struct cleanup_task; 112 struct kobject kobj; 113 114/* ioat v2 / v3 channel attributes 115 * @xfercap_log; log2 of channel max transfer length (for fast division) 116 * @head: allocated index 117 * @issued: hardware notification point 118 * @tail: cleanup index 119 * @dmacount: identical to 'head' except for occasionally resetting to zero 120 * @alloc_order: log2 of the number of allocated descriptors 121 * @produce: number of descriptors to produce at submit time 122 * @ring: software ring buffer implementation of hardware ring 123 * @prep_lock: serializes descriptor preparation (producers) 124 */ 125 size_t xfercap_log; 126 u16 head; 127 u16 issued; 128 u16 tail; 129 u16 dmacount; 130 u16 alloc_order; 131 u16 produce; 132 struct ioat_ring_ent **ring; 133 spinlock_t prep_lock; 134}; 135 136struct ioat_sysfs_entry { 137 struct attribute attr; 138 ssize_t (*show)(struct dma_chan *, char *); 139}; 140 141/** 142 * struct ioat_sed_ent - wrapper around super extended hardware descriptor 143 * @hw: hardware SED 144 * @dma: dma address for the SED 145 * @parent: point to the dma descriptor that's the parent 146 * @hw_pool: descriptor pool index 147 */ 148struct ioat_sed_ent { 149 struct ioat_sed_raw_descriptor *hw; 150 dma_addr_t dma; 151 struct ioat_ring_ent *parent; 152 unsigned int hw_pool; 153}; 154 155/** 156 * struct ioat_ring_ent - wrapper around hardware descriptor 157 * @hw: hardware DMA descriptor (for memcpy) 158 * @xor: hardware xor descriptor 159 * @xor_ex: hardware xor extension descriptor 160 * @pq: hardware pq descriptor 161 * @pq_ex: hardware pq extension descriptor 162 * @pqu: hardware pq update descriptor 163 * @raw: hardware raw (un-typed) descriptor 164 * @txd: the generic software descriptor for all engines 165 * @len: total transaction length for unmap 166 * @result: asynchronous result of validate operations 167 * @id: identifier for debug 168 * @sed: pointer to super extended descriptor sw desc 169 */ 170 171struct ioat_ring_ent { 172 union { 173 struct ioat_dma_descriptor *hw; 174 struct ioat_xor_descriptor *xor; 175 struct ioat_xor_ext_descriptor *xor_ex; 176 struct ioat_pq_descriptor *pq; 177 struct ioat_pq_ext_descriptor *pq_ex; 178 struct ioat_pq_update_descriptor *pqu; 179 struct ioat_raw_descriptor *raw; 180 }; 181 size_t len; 182 struct dma_async_tx_descriptor txd; 183 enum sum_check_flags *result; 184 #ifdef DEBUG 185 int id; 186 #endif 187 struct ioat_sed_ent *sed; 188}; 189 190extern const struct sysfs_ops ioat_sysfs_ops; 191extern struct ioat_sysfs_entry ioat_version_attr; 192extern struct ioat_sysfs_entry ioat_cap_attr; 193extern int ioat_pending_level; 194extern int ioat_ring_alloc_order; 195extern struct kobj_type ioat_ktype; 196extern struct kmem_cache *ioat_cache; 197extern int ioat_ring_max_alloc_order; 198extern struct kmem_cache *ioat_sed_cache; 199 200static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) 201{ 202 return container_of(c, struct ioatdma_chan, dma_chan); 203} 204 205/* wrapper around hardware descriptor format + additional software fields */ 206#ifdef DEBUG 207#define set_desc_id(desc, i) ((desc)->id = (i)) 208#define desc_id(desc) ((desc)->id) 209#else 210#define set_desc_id(desc, i) 211#define desc_id(desc) (0) 212#endif 213 214static inline void 215__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, 216 struct dma_async_tx_descriptor *tx, int id) 217{ 218 struct device *dev = to_dev(ioat_chan); 219 220 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" 221 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, 222 (unsigned long long) tx->phys, 223 (unsigned long long) hw->next, tx->cookie, tx->flags, 224 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); 225} 226 227#define dump_desc_dbg(c, d) \ 228 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) 229 230static inline struct ioatdma_chan * 231ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) 232{ 233 return ioat_dma->idx[index]; 234} 235 236static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) 237{ 238 u8 ver = ioat_chan->ioat_dma->version; 239 u64 status; 240 u32 status_lo; 241 242 /* We need to read the low address first as this causes the 243 * chipset to latch the upper bits for the subsequent read 244 */ 245 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); 246 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); 247 status <<= 32; 248 status |= status_lo; 249 250 return status; 251} 252 253#if BITS_PER_LONG == 64 254 255static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) 256{ 257 u8 ver = ioat_chan->ioat_dma->version; 258 u64 status; 259 260 /* With IOAT v3.3 the status register is 64bit. */ 261 if (ver >= IOAT_VER_3_3) 262 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); 263 else 264 status = ioat_chansts_32(ioat_chan); 265 266 return status; 267} 268 269#else 270#define ioat_chansts ioat_chansts_32 271#endif 272 273static inline u64 ioat_chansts_to_addr(u64 status) 274{ 275 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 276} 277 278static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) 279{ 280 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 281} 282 283static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) 284{ 285 u8 ver = ioat_chan->ioat_dma->version; 286 287 writeb(IOAT_CHANCMD_SUSPEND, 288 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 289} 290 291static inline void ioat_reset(struct ioatdma_chan *ioat_chan) 292{ 293 u8 ver = ioat_chan->ioat_dma->version; 294 295 writeb(IOAT_CHANCMD_RESET, 296 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 297} 298 299static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) 300{ 301 u8 ver = ioat_chan->ioat_dma->version; 302 u8 cmd; 303 304 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 305 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; 306} 307 308static inline bool is_ioat_active(unsigned long status) 309{ 310 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); 311} 312 313static inline bool is_ioat_idle(unsigned long status) 314{ 315 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); 316} 317 318static inline bool is_ioat_halted(unsigned long status) 319{ 320 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); 321} 322 323static inline bool is_ioat_suspended(unsigned long status) 324{ 325 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); 326} 327 328/* channel was fatally programmed */ 329static inline bool is_ioat_bug(unsigned long err) 330{ 331 return !!err; 332} 333 334#define IOAT_MAX_ORDER 16 335#define ioat_get_alloc_order() \ 336 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) 337#define ioat_get_max_alloc_order() \ 338 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) 339 340static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) 341{ 342 return 1 << ioat_chan->alloc_order; 343} 344 345/* count of descriptors in flight with the engine */ 346static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) 347{ 348 return CIRC_CNT(ioat_chan->head, ioat_chan->tail, 349 ioat_ring_size(ioat_chan)); 350} 351 352/* count of descriptors pending submission to hardware */ 353static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) 354{ 355 return CIRC_CNT(ioat_chan->head, ioat_chan->issued, 356 ioat_ring_size(ioat_chan)); 357} 358 359static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) 360{ 361 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); 362} 363 364static inline u16 365ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) 366{ 367 u16 num_descs = len >> ioat_chan->xfercap_log; 368 369 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); 370 return num_descs; 371} 372 373static inline struct ioat_ring_ent * 374ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) 375{ 376 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; 377} 378 379static inline void 380ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) 381{ 382 writel(addr & 0x00000000FFFFFFFF, 383 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 384 writel(addr >> 32, 385 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 386} 387 388/* IOAT Prep functions */ 389struct dma_async_tx_descriptor * 390ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 391 dma_addr_t dma_src, size_t len, unsigned long flags); 392struct dma_async_tx_descriptor * 393ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); 394struct dma_async_tx_descriptor * 395ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 396 unsigned int src_cnt, size_t len, unsigned long flags); 397struct dma_async_tx_descriptor * 398ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 399 unsigned int src_cnt, size_t len, 400 enum sum_check_flags *result, unsigned long flags); 401struct dma_async_tx_descriptor * 402ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 403 unsigned int src_cnt, const unsigned char *scf, size_t len, 404 unsigned long flags); 405struct dma_async_tx_descriptor * 406ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 407 unsigned int src_cnt, const unsigned char *scf, size_t len, 408 enum sum_check_flags *pqres, unsigned long flags); 409struct dma_async_tx_descriptor * 410ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 411 unsigned int src_cnt, size_t len, unsigned long flags); 412struct dma_async_tx_descriptor * 413ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 414 unsigned int src_cnt, size_t len, 415 enum sum_check_flags *result, unsigned long flags); 416 417/* IOAT Operation functions */ 418irqreturn_t ioat_dma_do_interrupt(int irq, void *data); 419irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); 420struct ioat_ring_ent ** 421ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); 422void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); 423void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); 424int ioat_reset_hw(struct ioatdma_chan *ioat_chan); 425enum dma_status 426ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, 427 struct dma_tx_state *txstate); 428void ioat_cleanup_event(unsigned long data); 429void ioat_timer_event(unsigned long data); 430int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); 431void ioat_issue_pending(struct dma_chan *chan); 432void ioat_timer_event(unsigned long data); 433 434/* IOAT Init functions */ 435bool is_bwd_ioat(struct pci_dev *pdev); 436struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 437void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); 438void ioat_kobject_del(struct ioatdma_device *ioat_dma); 439int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); 440void ioat_stop(struct ioatdma_chan *ioat_chan); 441#endif /* IOATDMA_H */