Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc2 1722 lines 48 kB view raw
1/* 2 * Intel I/OAT DMA Linux driver 3 * Copyright(c) 2004 - 2007 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 */ 22 23/* 24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous 25 * copy operations. 26 */ 27 28#include <linux/init.h> 29#include <linux/module.h> 30#include <linux/pci.h> 31#include <linux/interrupt.h> 32#include <linux/dmaengine.h> 33#include <linux/delay.h> 34#include <linux/dma-mapping.h> 35#include <linux/workqueue.h> 36#include <linux/i7300_idle.h> 37#include "ioatdma.h" 38#include "ioatdma_registers.h" 39#include "ioatdma_hw.h" 40 41#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) 42#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) 43#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 44#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 45 46#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) 47static int ioat_pending_level = 4; 48module_param(ioat_pending_level, int, 0644); 49MODULE_PARM_DESC(ioat_pending_level, 50 "high-water mark for pushing ioat descriptors (default: 4)"); 51 52#define RESET_DELAY msecs_to_jiffies(100) 53#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) 54static void ioat_dma_chan_reset_part2(struct work_struct *work); 55static void ioat_dma_chan_watchdog(struct work_struct *work); 56 57/* 58 * workaround for IOAT ver.3.0 null descriptor issue 59 * (channel returns error when size is 0) 60 */ 61#define NULL_DESC_BUFFER_SIZE 1 62 63/* internal functions */ 64static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 65static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 66 67static struct ioat_desc_sw * 68ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); 69static struct ioat_desc_sw * 70ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); 71 72static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( 73 struct ioatdma_device *device, 74 int index) 75{ 76 return device->idx[index]; 77} 78 79/** 80 * ioat_dma_do_interrupt - handler used for single vector interrupt mode 81 * @irq: interrupt id 82 * @data: interrupt data 83 */ 84static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) 85{ 86 struct ioatdma_device *instance = data; 87 struct ioat_dma_chan *ioat_chan; 88 unsigned long attnstatus; 89 int bit; 90 u8 intrctrl; 91 92 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); 93 94 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) 95 return IRQ_NONE; 96 97 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { 98 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 99 return IRQ_NONE; 100 } 101 102 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 103 for_each_bit(bit, &attnstatus, BITS_PER_LONG) { 104 ioat_chan = ioat_lookup_chan_by_index(instance, bit); 105 tasklet_schedule(&ioat_chan->cleanup_task); 106 } 107 108 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 109 return IRQ_HANDLED; 110} 111 112/** 113 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode 114 * @irq: interrupt id 115 * @data: interrupt data 116 */ 117static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) 118{ 119 struct ioat_dma_chan *ioat_chan = data; 120 121 tasklet_schedule(&ioat_chan->cleanup_task); 122 123 return IRQ_HANDLED; 124} 125 126static void ioat_dma_cleanup_tasklet(unsigned long data); 127 128/** 129 * ioat_dma_enumerate_channels - find and initialize the device's channels 130 * @device: the device to be enumerated 131 */ 132static int ioat_dma_enumerate_channels(struct ioatdma_device *device) 133{ 134 u8 xfercap_scale; 135 u32 xfercap; 136 int i; 137 struct ioat_dma_chan *ioat_chan; 138 139 /* 140 * IOAT ver.3 workarounds 141 */ 142 if (device->version == IOAT_VER_3_0) { 143 u32 chan_err_mask; 144 u16 dev_id; 145 u32 dmauncerrsts; 146 147 /* 148 * Write CHANERRMSK_INT with 3E07h to mask out the errors 149 * that can cause stability issues for IOAT ver.3 150 */ 151 chan_err_mask = 0x3E07; 152 pci_write_config_dword(device->pdev, 153 IOAT_PCI_CHANERRMASK_INT_OFFSET, 154 chan_err_mask); 155 156 /* 157 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit 158 * (workaround for spurious config parity error after restart) 159 */ 160 pci_read_config_word(device->pdev, 161 IOAT_PCI_DEVICE_ID_OFFSET, 162 &dev_id); 163 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { 164 dmauncerrsts = 0x10; 165 pci_write_config_dword(device->pdev, 166 IOAT_PCI_DMAUNCERRSTS_OFFSET, 167 dmauncerrsts); 168 } 169 } 170 171 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 172 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 173 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 174 175#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 176 if (i7300_idle_platform_probe(NULL, NULL) == 0) { 177 device->common.chancnt--; 178 } 179#endif 180 for (i = 0; i < device->common.chancnt; i++) { 181 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); 182 if (!ioat_chan) { 183 device->common.chancnt = i; 184 break; 185 } 186 187 ioat_chan->device = device; 188 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 189 ioat_chan->xfercap = xfercap; 190 ioat_chan->desccount = 0; 191 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); 192 if (ioat_chan->device->version != IOAT_VER_1_2) { 193 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 194 | IOAT_DMA_DCA_ANY_CPU, 195 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 196 } 197 spin_lock_init(&ioat_chan->cleanup_lock); 198 spin_lock_init(&ioat_chan->desc_lock); 199 INIT_LIST_HEAD(&ioat_chan->free_desc); 200 INIT_LIST_HEAD(&ioat_chan->used_desc); 201 /* This should be made common somewhere in dmaengine.c */ 202 ioat_chan->common.device = &device->common; 203 list_add_tail(&ioat_chan->common.device_node, 204 &device->common.channels); 205 device->idx[i] = ioat_chan; 206 tasklet_init(&ioat_chan->cleanup_task, 207 ioat_dma_cleanup_tasklet, 208 (unsigned long) ioat_chan); 209 tasklet_disable(&ioat_chan->cleanup_task); 210 } 211 return device->common.chancnt; 212} 213 214/** 215 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 216 * descriptors to hw 217 * @chan: DMA channel handle 218 */ 219static inline void __ioat1_dma_memcpy_issue_pending( 220 struct ioat_dma_chan *ioat_chan) 221{ 222 ioat_chan->pending = 0; 223 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); 224} 225 226static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 227{ 228 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 229 230 if (ioat_chan->pending > 0) { 231 spin_lock_bh(&ioat_chan->desc_lock); 232 __ioat1_dma_memcpy_issue_pending(ioat_chan); 233 spin_unlock_bh(&ioat_chan->desc_lock); 234 } 235} 236 237static inline void __ioat2_dma_memcpy_issue_pending( 238 struct ioat_dma_chan *ioat_chan) 239{ 240 ioat_chan->pending = 0; 241 writew(ioat_chan->dmacount, 242 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 243} 244 245static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) 246{ 247 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 248 249 if (ioat_chan->pending > 0) { 250 spin_lock_bh(&ioat_chan->desc_lock); 251 __ioat2_dma_memcpy_issue_pending(ioat_chan); 252 spin_unlock_bh(&ioat_chan->desc_lock); 253 } 254} 255 256 257/** 258 * ioat_dma_chan_reset_part2 - reinit the channel after a reset 259 */ 260static void ioat_dma_chan_reset_part2(struct work_struct *work) 261{ 262 struct ioat_dma_chan *ioat_chan = 263 container_of(work, struct ioat_dma_chan, work.work); 264 struct ioat_desc_sw *desc; 265 266 spin_lock_bh(&ioat_chan->cleanup_lock); 267 spin_lock_bh(&ioat_chan->desc_lock); 268 269 ioat_chan->completion_virt->low = 0; 270 ioat_chan->completion_virt->high = 0; 271 ioat_chan->pending = 0; 272 273 /* 274 * count the descriptors waiting, and be sure to do it 275 * right for both the CB1 line and the CB2 ring 276 */ 277 ioat_chan->dmacount = 0; 278 if (ioat_chan->used_desc.prev) { 279 desc = to_ioat_desc(ioat_chan->used_desc.prev); 280 do { 281 ioat_chan->dmacount++; 282 desc = to_ioat_desc(desc->node.next); 283 } while (&desc->node != ioat_chan->used_desc.next); 284 } 285 286 /* 287 * write the new starting descriptor address 288 * this puts channel engine into ARMED state 289 */ 290 desc = to_ioat_desc(ioat_chan->used_desc.prev); 291 switch (ioat_chan->device->version) { 292 case IOAT_VER_1_2: 293 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 294 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 295 writel(((u64) desc->async_tx.phys) >> 32, 296 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 297 298 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base 299 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 300 break; 301 case IOAT_VER_2_0: 302 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 303 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 304 writel(((u64) desc->async_tx.phys) >> 32, 305 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 306 307 /* tell the engine to go with what's left to be done */ 308 writew(ioat_chan->dmacount, 309 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 310 311 break; 312 } 313 dev_err(&ioat_chan->device->pdev->dev, 314 "chan%d reset - %d descs waiting, %d total desc\n", 315 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); 316 317 spin_unlock_bh(&ioat_chan->desc_lock); 318 spin_unlock_bh(&ioat_chan->cleanup_lock); 319} 320 321/** 322 * ioat_dma_reset_channel - restart a channel 323 * @ioat_chan: IOAT DMA channel handle 324 */ 325static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) 326{ 327 u32 chansts, chanerr; 328 329 if (!ioat_chan->used_desc.prev) 330 return; 331 332 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 333 chansts = (ioat_chan->completion_virt->low 334 & IOAT_CHANSTS_DMA_TRANSFER_STATUS); 335 if (chanerr) { 336 dev_err(&ioat_chan->device->pdev->dev, 337 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", 338 chan_num(ioat_chan), chansts, chanerr); 339 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 340 } 341 342 /* 343 * whack it upside the head with a reset 344 * and wait for things to settle out. 345 * force the pending count to a really big negative 346 * to make sure no one forces an issue_pending 347 * while we're waiting. 348 */ 349 350 spin_lock_bh(&ioat_chan->desc_lock); 351 ioat_chan->pending = INT_MIN; 352 writeb(IOAT_CHANCMD_RESET, 353 ioat_chan->reg_base 354 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 355 spin_unlock_bh(&ioat_chan->desc_lock); 356 357 /* schedule the 2nd half instead of sleeping a long time */ 358 schedule_delayed_work(&ioat_chan->work, RESET_DELAY); 359} 360 361/** 362 * ioat_dma_chan_watchdog - watch for stuck channels 363 */ 364static void ioat_dma_chan_watchdog(struct work_struct *work) 365{ 366 struct ioatdma_device *device = 367 container_of(work, struct ioatdma_device, work.work); 368 struct ioat_dma_chan *ioat_chan; 369 int i; 370 371 union { 372 u64 full; 373 struct { 374 u32 low; 375 u32 high; 376 }; 377 } completion_hw; 378 unsigned long compl_desc_addr_hw; 379 380 for (i = 0; i < device->common.chancnt; i++) { 381 ioat_chan = ioat_lookup_chan_by_index(device, i); 382 383 if (ioat_chan->device->version == IOAT_VER_1_2 384 /* have we started processing anything yet */ 385 && ioat_chan->last_completion 386 /* have we completed any since last watchdog cycle? */ 387 && (ioat_chan->last_completion == 388 ioat_chan->watchdog_completion) 389 /* has TCP stuck on one cookie since last watchdog? */ 390 && (ioat_chan->watchdog_tcp_cookie == 391 ioat_chan->watchdog_last_tcp_cookie) 392 && (ioat_chan->watchdog_tcp_cookie != 393 ioat_chan->completed_cookie) 394 /* is there something in the chain to be processed? */ 395 /* CB1 chain always has at least the last one processed */ 396 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) 397 && ioat_chan->pending == 0) { 398 399 /* 400 * check CHANSTS register for completed 401 * descriptor address. 402 * if it is different than completion writeback, 403 * it is not zero 404 * and it has changed since the last watchdog 405 * we can assume that channel 406 * is still working correctly 407 * and the problem is in completion writeback. 408 * update completion writeback 409 * with actual CHANSTS value 410 * else 411 * try resetting the channel 412 */ 413 414 completion_hw.low = readl(ioat_chan->reg_base + 415 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); 416 completion_hw.high = readl(ioat_chan->reg_base + 417 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); 418#if (BITS_PER_LONG == 64) 419 compl_desc_addr_hw = 420 completion_hw.full 421 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 422#else 423 compl_desc_addr_hw = 424 completion_hw.low & IOAT_LOW_COMPLETION_MASK; 425#endif 426 427 if ((compl_desc_addr_hw != 0) 428 && (compl_desc_addr_hw != ioat_chan->watchdog_completion) 429 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { 430 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; 431 ioat_chan->completion_virt->low = completion_hw.low; 432 ioat_chan->completion_virt->high = completion_hw.high; 433 } else { 434 ioat_dma_reset_channel(ioat_chan); 435 ioat_chan->watchdog_completion = 0; 436 ioat_chan->last_compl_desc_addr_hw = 0; 437 } 438 439 /* 440 * for version 2.0 if there are descriptors yet to be processed 441 * and the last completed hasn't changed since the last watchdog 442 * if they haven't hit the pending level 443 * issue the pending to push them through 444 * else 445 * try resetting the channel 446 */ 447 } else if (ioat_chan->device->version == IOAT_VER_2_0 448 && ioat_chan->used_desc.prev 449 && ioat_chan->last_completion 450 && ioat_chan->last_completion == ioat_chan->watchdog_completion) { 451 452 if (ioat_chan->pending < ioat_pending_level) 453 ioat2_dma_memcpy_issue_pending(&ioat_chan->common); 454 else { 455 ioat_dma_reset_channel(ioat_chan); 456 ioat_chan->watchdog_completion = 0; 457 } 458 } else { 459 ioat_chan->last_compl_desc_addr_hw = 0; 460 ioat_chan->watchdog_completion 461 = ioat_chan->last_completion; 462 } 463 464 ioat_chan->watchdog_last_tcp_cookie = 465 ioat_chan->watchdog_tcp_cookie; 466 } 467 468 schedule_delayed_work(&device->work, WATCHDOG_DELAY); 469} 470 471static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 472{ 473 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 474 struct ioat_desc_sw *first = tx_to_ioat_desc(tx); 475 struct ioat_desc_sw *prev, *new; 476 struct ioat_dma_descriptor *hw; 477 dma_cookie_t cookie; 478 LIST_HEAD(new_chain); 479 u32 copy; 480 size_t len; 481 dma_addr_t src, dst; 482 unsigned long orig_flags; 483 unsigned int desc_count = 0; 484 485 /* src and dest and len are stored in the initial descriptor */ 486 len = first->len; 487 src = first->src; 488 dst = first->dst; 489 orig_flags = first->async_tx.flags; 490 new = first; 491 492 spin_lock_bh(&ioat_chan->desc_lock); 493 prev = to_ioat_desc(ioat_chan->used_desc.prev); 494 prefetch(prev->hw); 495 do { 496 copy = min_t(size_t, len, ioat_chan->xfercap); 497 498 async_tx_ack(&new->async_tx); 499 500 hw = new->hw; 501 hw->size = copy; 502 hw->ctl = 0; 503 hw->src_addr = src; 504 hw->dst_addr = dst; 505 hw->next = 0; 506 507 /* chain together the physical address list for the HW */ 508 wmb(); 509 prev->hw->next = (u64) new->async_tx.phys; 510 511 len -= copy; 512 dst += copy; 513 src += copy; 514 515 list_add_tail(&new->node, &new_chain); 516 desc_count++; 517 prev = new; 518 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); 519 520 if (!new) { 521 dev_err(&ioat_chan->device->pdev->dev, 522 "tx submit failed\n"); 523 spin_unlock_bh(&ioat_chan->desc_lock); 524 return -ENOMEM; 525 } 526 527 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 528 if (first->async_tx.callback) { 529 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 530 if (first != new) { 531 /* move callback into to last desc */ 532 new->async_tx.callback = first->async_tx.callback; 533 new->async_tx.callback_param 534 = first->async_tx.callback_param; 535 first->async_tx.callback = NULL; 536 first->async_tx.callback_param = NULL; 537 } 538 } 539 540 new->tx_cnt = desc_count; 541 new->async_tx.flags = orig_flags; /* client is in control of this ack */ 542 543 /* store the original values for use in later cleanup */ 544 if (new != first) { 545 new->src = first->src; 546 new->dst = first->dst; 547 new->len = first->len; 548 } 549 550 /* cookie incr and addition to used_list must be atomic */ 551 cookie = ioat_chan->common.cookie; 552 cookie++; 553 if (cookie < 0) 554 cookie = 1; 555 ioat_chan->common.cookie = new->async_tx.cookie = cookie; 556 557 /* write address into NextDescriptor field of last desc in chain */ 558 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = 559 first->async_tx.phys; 560 list_splice_tail(&new_chain, &ioat_chan->used_desc); 561 562 ioat_chan->dmacount += desc_count; 563 ioat_chan->pending += desc_count; 564 if (ioat_chan->pending >= ioat_pending_level) 565 __ioat1_dma_memcpy_issue_pending(ioat_chan); 566 spin_unlock_bh(&ioat_chan->desc_lock); 567 568 return cookie; 569} 570 571static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) 572{ 573 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 574 struct ioat_desc_sw *first = tx_to_ioat_desc(tx); 575 struct ioat_desc_sw *new; 576 struct ioat_dma_descriptor *hw; 577 dma_cookie_t cookie; 578 u32 copy; 579 size_t len; 580 dma_addr_t src, dst; 581 unsigned long orig_flags; 582 unsigned int desc_count = 0; 583 584 /* src and dest and len are stored in the initial descriptor */ 585 len = first->len; 586 src = first->src; 587 dst = first->dst; 588 orig_flags = first->async_tx.flags; 589 new = first; 590 591 /* 592 * ioat_chan->desc_lock is still in force in version 2 path 593 * it gets unlocked at end of this function 594 */ 595 do { 596 copy = min_t(size_t, len, ioat_chan->xfercap); 597 598 async_tx_ack(&new->async_tx); 599 600 hw = new->hw; 601 hw->size = copy; 602 hw->ctl = 0; 603 hw->src_addr = src; 604 hw->dst_addr = dst; 605 606 len -= copy; 607 dst += copy; 608 src += copy; 609 desc_count++; 610 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); 611 612 if (!new) { 613 dev_err(&ioat_chan->device->pdev->dev, 614 "tx submit failed\n"); 615 spin_unlock_bh(&ioat_chan->desc_lock); 616 return -ENOMEM; 617 } 618 619 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 620 if (first->async_tx.callback) { 621 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 622 if (first != new) { 623 /* move callback into to last desc */ 624 new->async_tx.callback = first->async_tx.callback; 625 new->async_tx.callback_param 626 = first->async_tx.callback_param; 627 first->async_tx.callback = NULL; 628 first->async_tx.callback_param = NULL; 629 } 630 } 631 632 new->tx_cnt = desc_count; 633 new->async_tx.flags = orig_flags; /* client is in control of this ack */ 634 635 /* store the original values for use in later cleanup */ 636 if (new != first) { 637 new->src = first->src; 638 new->dst = first->dst; 639 new->len = first->len; 640 } 641 642 /* cookie incr and addition to used_list must be atomic */ 643 cookie = ioat_chan->common.cookie; 644 cookie++; 645 if (cookie < 0) 646 cookie = 1; 647 ioat_chan->common.cookie = new->async_tx.cookie = cookie; 648 649 ioat_chan->dmacount += desc_count; 650 ioat_chan->pending += desc_count; 651 if (ioat_chan->pending >= ioat_pending_level) 652 __ioat2_dma_memcpy_issue_pending(ioat_chan); 653 spin_unlock_bh(&ioat_chan->desc_lock); 654 655 return cookie; 656} 657 658/** 659 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair 660 * @ioat_chan: the channel supplying the memory pool for the descriptors 661 * @flags: allocation flags 662 */ 663static struct ioat_desc_sw *ioat_dma_alloc_descriptor( 664 struct ioat_dma_chan *ioat_chan, 665 gfp_t flags) 666{ 667 struct ioat_dma_descriptor *desc; 668 struct ioat_desc_sw *desc_sw; 669 struct ioatdma_device *ioatdma_device; 670 dma_addr_t phys; 671 672 ioatdma_device = to_ioatdma_device(ioat_chan->common.device); 673 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); 674 if (unlikely(!desc)) 675 return NULL; 676 677 desc_sw = kzalloc(sizeof(*desc_sw), flags); 678 if (unlikely(!desc_sw)) { 679 pci_pool_free(ioatdma_device->dma_pool, desc, phys); 680 return NULL; 681 } 682 683 memset(desc, 0, sizeof(*desc)); 684 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); 685 switch (ioat_chan->device->version) { 686 case IOAT_VER_1_2: 687 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 688 break; 689 case IOAT_VER_2_0: 690 case IOAT_VER_3_0: 691 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 692 break; 693 } 694 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); 695 696 desc_sw->hw = desc; 697 desc_sw->async_tx.phys = phys; 698 699 return desc_sw; 700} 701 702static int ioat_initial_desc_count = 256; 703module_param(ioat_initial_desc_count, int, 0644); 704MODULE_PARM_DESC(ioat_initial_desc_count, 705 "initial descriptors per channel (default: 256)"); 706 707/** 708 * ioat2_dma_massage_chan_desc - link the descriptors into a circle 709 * @ioat_chan: the channel to be massaged 710 */ 711static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) 712{ 713 struct ioat_desc_sw *desc, *_desc; 714 715 /* setup used_desc */ 716 ioat_chan->used_desc.next = ioat_chan->free_desc.next; 717 ioat_chan->used_desc.prev = NULL; 718 719 /* pull free_desc out of the circle so that every node is a hw 720 * descriptor, but leave it pointing to the list 721 */ 722 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; 723 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; 724 725 /* circle link the hw descriptors */ 726 desc = to_ioat_desc(ioat_chan->free_desc.next); 727 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; 728 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { 729 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; 730 } 731} 732 733/** 734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 735 * @chan: the channel to be filled out 736 */ 737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 738{ 739 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 740 struct ioat_desc_sw *desc; 741 u16 chanctrl; 742 u32 chanerr; 743 int i; 744 LIST_HEAD(tmp_list); 745 746 /* have we already been set up? */ 747 if (!list_empty(&ioat_chan->free_desc)) 748 return ioat_chan->desccount; 749 750 /* Setup register to interrupt and write completion status on error */ 751 chanctrl = IOAT_CHANCTRL_ERR_INT_EN | 752 IOAT_CHANCTRL_ANY_ERR_ABORT_EN | 753 IOAT_CHANCTRL_ERR_COMPLETION_EN; 754 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); 755 756 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 757 if (chanerr) { 758 dev_err(&ioat_chan->device->pdev->dev, 759 "CHANERR = %x, clearing\n", chanerr); 760 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 761 } 762 763 /* Allocate descriptors */ 764 for (i = 0; i < ioat_initial_desc_count; i++) { 765 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); 766 if (!desc) { 767 dev_err(&ioat_chan->device->pdev->dev, 768 "Only %d initial descriptors\n", i); 769 break; 770 } 771 list_add_tail(&desc->node, &tmp_list); 772 } 773 spin_lock_bh(&ioat_chan->desc_lock); 774 ioat_chan->desccount = i; 775 list_splice(&tmp_list, &ioat_chan->free_desc); 776 if (ioat_chan->device->version != IOAT_VER_1_2) 777 ioat2_dma_massage_chan_desc(ioat_chan); 778 spin_unlock_bh(&ioat_chan->desc_lock); 779 780 /* allocate a completion writeback area */ 781 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 782 ioat_chan->completion_virt = 783 pci_pool_alloc(ioat_chan->device->completion_pool, 784 GFP_KERNEL, 785 &ioat_chan->completion_addr); 786 memset(ioat_chan->completion_virt, 0, 787 sizeof(*ioat_chan->completion_virt)); 788 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, 789 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 790 writel(((u64) ioat_chan->completion_addr) >> 32, 791 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 792 793 tasklet_enable(&ioat_chan->cleanup_task); 794 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ 795 return ioat_chan->desccount; 796} 797 798/** 799 * ioat_dma_free_chan_resources - release all the descriptors 800 * @chan: the channel to be cleaned 801 */ 802static void ioat_dma_free_chan_resources(struct dma_chan *chan) 803{ 804 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 805 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); 806 struct ioat_desc_sw *desc, *_desc; 807 int in_use_descs = 0; 808 809 /* Before freeing channel resources first check 810 * if they have been previously allocated for this channel. 811 */ 812 if (ioat_chan->desccount == 0) 813 return; 814 815 tasklet_disable(&ioat_chan->cleanup_task); 816 ioat_dma_memcpy_cleanup(ioat_chan); 817 818 /* Delay 100ms after reset to allow internal DMA logic to quiesce 819 * before removing DMA descriptor resources. 820 */ 821 writeb(IOAT_CHANCMD_RESET, 822 ioat_chan->reg_base 823 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 824 mdelay(100); 825 826 spin_lock_bh(&ioat_chan->desc_lock); 827 switch (ioat_chan->device->version) { 828 case IOAT_VER_1_2: 829 list_for_each_entry_safe(desc, _desc, 830 &ioat_chan->used_desc, node) { 831 in_use_descs++; 832 list_del(&desc->node); 833 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 834 desc->async_tx.phys); 835 kfree(desc); 836 } 837 list_for_each_entry_safe(desc, _desc, 838 &ioat_chan->free_desc, node) { 839 list_del(&desc->node); 840 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 841 desc->async_tx.phys); 842 kfree(desc); 843 } 844 break; 845 case IOAT_VER_2_0: 846 case IOAT_VER_3_0: 847 list_for_each_entry_safe(desc, _desc, 848 ioat_chan->free_desc.next, node) { 849 list_del(&desc->node); 850 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 851 desc->async_tx.phys); 852 kfree(desc); 853 } 854 desc = to_ioat_desc(ioat_chan->free_desc.next); 855 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 856 desc->async_tx.phys); 857 kfree(desc); 858 INIT_LIST_HEAD(&ioat_chan->free_desc); 859 INIT_LIST_HEAD(&ioat_chan->used_desc); 860 break; 861 } 862 spin_unlock_bh(&ioat_chan->desc_lock); 863 864 pci_pool_free(ioatdma_device->completion_pool, 865 ioat_chan->completion_virt, 866 ioat_chan->completion_addr); 867 868 /* one is ok since we left it on there on purpose */ 869 if (in_use_descs > 1) 870 dev_err(&ioat_chan->device->pdev->dev, 871 "Freeing %d in use descriptors!\n", 872 in_use_descs - 1); 873 874 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 875 ioat_chan->pending = 0; 876 ioat_chan->dmacount = 0; 877 ioat_chan->desccount = 0; 878 ioat_chan->watchdog_completion = 0; 879 ioat_chan->last_compl_desc_addr_hw = 0; 880 ioat_chan->watchdog_tcp_cookie = 881 ioat_chan->watchdog_last_tcp_cookie = 0; 882} 883 884/** 885 * ioat_dma_get_next_descriptor - return the next available descriptor 886 * @ioat_chan: IOAT DMA channel handle 887 * 888 * Gets the next descriptor from the chain, and must be called with the 889 * channel's desc_lock held. Allocates more descriptors if the channel 890 * has run out. 891 */ 892static struct ioat_desc_sw * 893ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 894{ 895 struct ioat_desc_sw *new; 896 897 if (!list_empty(&ioat_chan->free_desc)) { 898 new = to_ioat_desc(ioat_chan->free_desc.next); 899 list_del(&new->node); 900 } else { 901 /* try to get another desc */ 902 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 903 if (!new) { 904 dev_err(&ioat_chan->device->pdev->dev, 905 "alloc failed\n"); 906 return NULL; 907 } 908 } 909 910 prefetch(new->hw); 911 return new; 912} 913 914static struct ioat_desc_sw * 915ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 916{ 917 struct ioat_desc_sw *new; 918 919 /* 920 * used.prev points to where to start processing 921 * used.next points to next free descriptor 922 * if used.prev == NULL, there are none waiting to be processed 923 * if used.next == used.prev.prev, there is only one free descriptor, 924 * and we need to use it to as a noop descriptor before 925 * linking in a new set of descriptors, since the device 926 * has probably already read the pointer to it 927 */ 928 if (ioat_chan->used_desc.prev && 929 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { 930 931 struct ioat_desc_sw *desc; 932 struct ioat_desc_sw *noop_desc; 933 int i; 934 935 /* set up the noop descriptor */ 936 noop_desc = to_ioat_desc(ioat_chan->used_desc.next); 937 /* set size to non-zero value (channel returns error when size is 0) */ 938 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; 939 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; 940 noop_desc->hw->src_addr = 0; 941 noop_desc->hw->dst_addr = 0; 942 943 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; 944 ioat_chan->pending++; 945 ioat_chan->dmacount++; 946 947 /* try to get a few more descriptors */ 948 for (i = 16; i; i--) { 949 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 950 if (!desc) { 951 dev_err(&ioat_chan->device->pdev->dev, 952 "alloc failed\n"); 953 break; 954 } 955 list_add_tail(&desc->node, ioat_chan->used_desc.next); 956 957 desc->hw->next 958 = to_ioat_desc(desc->node.next)->async_tx.phys; 959 to_ioat_desc(desc->node.prev)->hw->next 960 = desc->async_tx.phys; 961 ioat_chan->desccount++; 962 } 963 964 ioat_chan->used_desc.next = noop_desc->node.next; 965 } 966 new = to_ioat_desc(ioat_chan->used_desc.next); 967 prefetch(new); 968 ioat_chan->used_desc.next = new->node.next; 969 970 if (ioat_chan->used_desc.prev == NULL) 971 ioat_chan->used_desc.prev = &new->node; 972 973 prefetch(new->hw); 974 return new; 975} 976 977static struct ioat_desc_sw *ioat_dma_get_next_descriptor( 978 struct ioat_dma_chan *ioat_chan) 979{ 980 if (!ioat_chan) 981 return NULL; 982 983 switch (ioat_chan->device->version) { 984 case IOAT_VER_1_2: 985 return ioat1_dma_get_next_descriptor(ioat_chan); 986 case IOAT_VER_2_0: 987 case IOAT_VER_3_0: 988 return ioat2_dma_get_next_descriptor(ioat_chan); 989 } 990 return NULL; 991} 992 993static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( 994 struct dma_chan *chan, 995 dma_addr_t dma_dest, 996 dma_addr_t dma_src, 997 size_t len, 998 unsigned long flags) 999{ 1000 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 1001 struct ioat_desc_sw *new; 1002 1003 spin_lock_bh(&ioat_chan->desc_lock); 1004 new = ioat_dma_get_next_descriptor(ioat_chan); 1005 spin_unlock_bh(&ioat_chan->desc_lock); 1006 1007 if (new) { 1008 new->len = len; 1009 new->dst = dma_dest; 1010 new->src = dma_src; 1011 new->async_tx.flags = flags; 1012 return &new->async_tx; 1013 } else { 1014 dev_err(&ioat_chan->device->pdev->dev, 1015 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", 1016 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); 1017 return NULL; 1018 } 1019} 1020 1021static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 1022 struct dma_chan *chan, 1023 dma_addr_t dma_dest, 1024 dma_addr_t dma_src, 1025 size_t len, 1026 unsigned long flags) 1027{ 1028 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 1029 struct ioat_desc_sw *new; 1030 1031 spin_lock_bh(&ioat_chan->desc_lock); 1032 new = ioat2_dma_get_next_descriptor(ioat_chan); 1033 1034 /* 1035 * leave ioat_chan->desc_lock set in ioat 2 path 1036 * it will get unlocked at end of tx_submit 1037 */ 1038 1039 if (new) { 1040 new->len = len; 1041 new->dst = dma_dest; 1042 new->src = dma_src; 1043 new->async_tx.flags = flags; 1044 return &new->async_tx; 1045 } else { 1046 spin_unlock_bh(&ioat_chan->desc_lock); 1047 dev_err(&ioat_chan->device->pdev->dev, 1048 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", 1049 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); 1050 return NULL; 1051 } 1052} 1053 1054static void ioat_dma_cleanup_tasklet(unsigned long data) 1055{ 1056 struct ioat_dma_chan *chan = (void *)data; 1057 ioat_dma_memcpy_cleanup(chan); 1058 writew(IOAT_CHANCTRL_INT_DISABLE, 1059 chan->reg_base + IOAT_CHANCTRL_OFFSET); 1060} 1061 1062static void 1063ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) 1064{ 1065 /* 1066 * yes we are unmapping both _page and _single 1067 * alloc'd regions with unmap_page. Is this 1068 * *really* that bad? 1069 */ 1070 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) 1071 pci_unmap_page(ioat_chan->device->pdev, 1072 pci_unmap_addr(desc, dst), 1073 pci_unmap_len(desc, len), 1074 PCI_DMA_FROMDEVICE); 1075 1076 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) 1077 pci_unmap_page(ioat_chan->device->pdev, 1078 pci_unmap_addr(desc, src), 1079 pci_unmap_len(desc, len), 1080 PCI_DMA_TODEVICE); 1081} 1082 1083/** 1084 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 1085 * @chan: ioat channel to be cleaned up 1086 */ 1087static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) 1088{ 1089 unsigned long phys_complete; 1090 struct ioat_desc_sw *desc, *_desc; 1091 dma_cookie_t cookie = 0; 1092 unsigned long desc_phys; 1093 struct ioat_desc_sw *latest_desc; 1094 1095 prefetch(ioat_chan->completion_virt); 1096 1097 if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) 1098 return; 1099 1100 /* The completion writeback can happen at any time, 1101 so reads by the driver need to be atomic operations 1102 The descriptor physical addresses are limited to 32-bits 1103 when the CPU can only do a 32-bit mov */ 1104 1105#if (BITS_PER_LONG == 64) 1106 phys_complete = 1107 ioat_chan->completion_virt->full 1108 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 1109#else 1110 phys_complete = 1111 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; 1112#endif 1113 1114 if ((ioat_chan->completion_virt->full 1115 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == 1116 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { 1117 dev_err(&ioat_chan->device->pdev->dev, 1118 "Channel halted, chanerr = %x\n", 1119 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); 1120 1121 /* TODO do something to salvage the situation */ 1122 } 1123 1124 if (phys_complete == ioat_chan->last_completion) { 1125 spin_unlock_bh(&ioat_chan->cleanup_lock); 1126 /* 1127 * perhaps we're stuck so hard that the watchdog can't go off? 1128 * try to catch it after 2 seconds 1129 */ 1130 if (ioat_chan->device->version != IOAT_VER_3_0) { 1131 if (time_after(jiffies, 1132 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { 1133 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); 1134 ioat_chan->last_completion_time = jiffies; 1135 } 1136 } 1137 return; 1138 } 1139 ioat_chan->last_completion_time = jiffies; 1140 1141 cookie = 0; 1142 if (!spin_trylock_bh(&ioat_chan->desc_lock)) { 1143 spin_unlock_bh(&ioat_chan->cleanup_lock); 1144 return; 1145 } 1146 1147 switch (ioat_chan->device->version) { 1148 case IOAT_VER_1_2: 1149 list_for_each_entry_safe(desc, _desc, 1150 &ioat_chan->used_desc, node) { 1151 1152 /* 1153 * Incoming DMA requests may use multiple descriptors, 1154 * due to exceeding xfercap, perhaps. If so, only the 1155 * last one will have a cookie, and require unmapping. 1156 */ 1157 if (desc->async_tx.cookie) { 1158 cookie = desc->async_tx.cookie; 1159 ioat_dma_unmap(ioat_chan, desc); 1160 if (desc->async_tx.callback) { 1161 desc->async_tx.callback(desc->async_tx.callback_param); 1162 desc->async_tx.callback = NULL; 1163 } 1164 } 1165 1166 if (desc->async_tx.phys != phys_complete) { 1167 /* 1168 * a completed entry, but not the last, so clean 1169 * up if the client is done with the descriptor 1170 */ 1171 if (async_tx_test_ack(&desc->async_tx)) { 1172 list_del(&desc->node); 1173 list_add_tail(&desc->node, 1174 &ioat_chan->free_desc); 1175 } else 1176 desc->async_tx.cookie = 0; 1177 } else { 1178 /* 1179 * last used desc. Do not remove, so we can 1180 * append from it, but don't look at it next 1181 * time, either 1182 */ 1183 desc->async_tx.cookie = 0; 1184 1185 /* TODO check status bits? */ 1186 break; 1187 } 1188 } 1189 break; 1190 case IOAT_VER_2_0: 1191 case IOAT_VER_3_0: 1192 /* has some other thread has already cleaned up? */ 1193 if (ioat_chan->used_desc.prev == NULL) 1194 break; 1195 1196 /* work backwards to find latest finished desc */ 1197 desc = to_ioat_desc(ioat_chan->used_desc.next); 1198 latest_desc = NULL; 1199 do { 1200 desc = to_ioat_desc(desc->node.prev); 1201 desc_phys = (unsigned long)desc->async_tx.phys 1202 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 1203 if (desc_phys == phys_complete) { 1204 latest_desc = desc; 1205 break; 1206 } 1207 } while (&desc->node != ioat_chan->used_desc.prev); 1208 1209 if (latest_desc != NULL) { 1210 1211 /* work forwards to clear finished descriptors */ 1212 for (desc = to_ioat_desc(ioat_chan->used_desc.prev); 1213 &desc->node != latest_desc->node.next && 1214 &desc->node != ioat_chan->used_desc.next; 1215 desc = to_ioat_desc(desc->node.next)) { 1216 if (desc->async_tx.cookie) { 1217 cookie = desc->async_tx.cookie; 1218 desc->async_tx.cookie = 0; 1219 ioat_dma_unmap(ioat_chan, desc); 1220 if (desc->async_tx.callback) { 1221 desc->async_tx.callback(desc->async_tx.callback_param); 1222 desc->async_tx.callback = NULL; 1223 } 1224 } 1225 } 1226 1227 /* move used.prev up beyond those that are finished */ 1228 if (&desc->node == ioat_chan->used_desc.next) 1229 ioat_chan->used_desc.prev = NULL; 1230 else 1231 ioat_chan->used_desc.prev = &desc->node; 1232 } 1233 break; 1234 } 1235 1236 spin_unlock_bh(&ioat_chan->desc_lock); 1237 1238 ioat_chan->last_completion = phys_complete; 1239 if (cookie != 0) 1240 ioat_chan->completed_cookie = cookie; 1241 1242 spin_unlock_bh(&ioat_chan->cleanup_lock); 1243} 1244 1245/** 1246 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction 1247 * @chan: IOAT DMA channel handle 1248 * @cookie: DMA transaction identifier 1249 * @done: if not %NULL, updated with last completed transaction 1250 * @used: if not %NULL, updated with last used transaction 1251 */ 1252static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, 1253 dma_cookie_t cookie, 1254 dma_cookie_t *done, 1255 dma_cookie_t *used) 1256{ 1257 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 1258 dma_cookie_t last_used; 1259 dma_cookie_t last_complete; 1260 enum dma_status ret; 1261 1262 last_used = chan->cookie; 1263 last_complete = ioat_chan->completed_cookie; 1264 ioat_chan->watchdog_tcp_cookie = cookie; 1265 1266 if (done) 1267 *done = last_complete; 1268 if (used) 1269 *used = last_used; 1270 1271 ret = dma_async_is_complete(cookie, last_complete, last_used); 1272 if (ret == DMA_SUCCESS) 1273 return ret; 1274 1275 ioat_dma_memcpy_cleanup(ioat_chan); 1276 1277 last_used = chan->cookie; 1278 last_complete = ioat_chan->completed_cookie; 1279 1280 if (done) 1281 *done = last_complete; 1282 if (used) 1283 *used = last_used; 1284 1285 return dma_async_is_complete(cookie, last_complete, last_used); 1286} 1287 1288static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) 1289{ 1290 struct ioat_desc_sw *desc; 1291 1292 spin_lock_bh(&ioat_chan->desc_lock); 1293 1294 desc = ioat_dma_get_next_descriptor(ioat_chan); 1295 1296 if (!desc) { 1297 dev_err(&ioat_chan->device->pdev->dev, 1298 "Unable to start null desc - get next desc failed\n"); 1299 spin_unlock_bh(&ioat_chan->desc_lock); 1300 return; 1301 } 1302 1303 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 1304 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 1305 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 1306 /* set size to non-zero value (channel returns error when size is 0) */ 1307 desc->hw->size = NULL_DESC_BUFFER_SIZE; 1308 desc->hw->src_addr = 0; 1309 desc->hw->dst_addr = 0; 1310 async_tx_ack(&desc->async_tx); 1311 switch (ioat_chan->device->version) { 1312 case IOAT_VER_1_2: 1313 desc->hw->next = 0; 1314 list_add_tail(&desc->node, &ioat_chan->used_desc); 1315 1316 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 1317 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 1318 writel(((u64) desc->async_tx.phys) >> 32, 1319 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 1320 1321 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base 1322 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 1323 break; 1324 case IOAT_VER_2_0: 1325 case IOAT_VER_3_0: 1326 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 1327 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 1328 writel(((u64) desc->async_tx.phys) >> 32, 1329 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 1330 1331 ioat_chan->dmacount++; 1332 __ioat2_dma_memcpy_issue_pending(ioat_chan); 1333 break; 1334 } 1335 spin_unlock_bh(&ioat_chan->desc_lock); 1336} 1337 1338/* 1339 * Perform a IOAT transaction to verify the HW works. 1340 */ 1341#define IOAT_TEST_SIZE 2000 1342 1343static void ioat_dma_test_callback(void *dma_async_param) 1344{ 1345 struct completion *cmp = dma_async_param; 1346 1347 complete(cmp); 1348} 1349 1350/** 1351 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. 1352 * @device: device to be tested 1353 */ 1354static int ioat_dma_self_test(struct ioatdma_device *device) 1355{ 1356 int i; 1357 u8 *src; 1358 u8 *dest; 1359 struct dma_chan *dma_chan; 1360 struct dma_async_tx_descriptor *tx; 1361 dma_addr_t dma_dest, dma_src; 1362 dma_cookie_t cookie; 1363 int err = 0; 1364 struct completion cmp; 1365 1366 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 1367 if (!src) 1368 return -ENOMEM; 1369 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 1370 if (!dest) { 1371 kfree(src); 1372 return -ENOMEM; 1373 } 1374 1375 /* Fill in src buffer */ 1376 for (i = 0; i < IOAT_TEST_SIZE; i++) 1377 src[i] = (u8)i; 1378 1379 /* Start copy, using first DMA channel */ 1380 dma_chan = container_of(device->common.channels.next, 1381 struct dma_chan, 1382 device_node); 1383 if (device->common.device_alloc_chan_resources(dma_chan) < 1) { 1384 dev_err(&device->pdev->dev, 1385 "selftest cannot allocate chan resource\n"); 1386 err = -ENODEV; 1387 goto out; 1388 } 1389 1390 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, 1391 DMA_TO_DEVICE); 1392 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, 1393 DMA_FROM_DEVICE); 1394 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 1395 IOAT_TEST_SIZE, 0); 1396 if (!tx) { 1397 dev_err(&device->pdev->dev, 1398 "Self-test prep failed, disabling\n"); 1399 err = -ENODEV; 1400 goto free_resources; 1401 } 1402 1403 async_tx_ack(tx); 1404 init_completion(&cmp); 1405 tx->callback = ioat_dma_test_callback; 1406 tx->callback_param = &cmp; 1407 cookie = tx->tx_submit(tx); 1408 if (cookie < 0) { 1409 dev_err(&device->pdev->dev, 1410 "Self-test setup failed, disabling\n"); 1411 err = -ENODEV; 1412 goto free_resources; 1413 } 1414 device->common.device_issue_pending(dma_chan); 1415 1416 wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1417 1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) 1419 != DMA_SUCCESS) { 1420 dev_err(&device->pdev->dev, 1421 "Self-test copy timed out, disabling\n"); 1422 err = -ENODEV; 1423 goto free_resources; 1424 } 1425 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 1426 dev_err(&device->pdev->dev, 1427 "Self-test copy failed compare, disabling\n"); 1428 err = -ENODEV; 1429 goto free_resources; 1430 } 1431 1432free_resources: 1433 device->common.device_free_chan_resources(dma_chan); 1434out: 1435 kfree(src); 1436 kfree(dest); 1437 return err; 1438} 1439 1440static char ioat_interrupt_style[32] = "msix"; 1441module_param_string(ioat_interrupt_style, ioat_interrupt_style, 1442 sizeof(ioat_interrupt_style), 0644); 1443MODULE_PARM_DESC(ioat_interrupt_style, 1444 "set ioat interrupt style: msix (default), " 1445 "msix-single-vector, msi, intx)"); 1446 1447/** 1448 * ioat_dma_setup_interrupts - setup interrupt handler 1449 * @device: ioat device 1450 */ 1451static int ioat_dma_setup_interrupts(struct ioatdma_device *device) 1452{ 1453 struct ioat_dma_chan *ioat_chan; 1454 int err, i, j, msixcnt; 1455 u8 intrctrl = 0; 1456 1457 if (!strcmp(ioat_interrupt_style, "msix")) 1458 goto msix; 1459 if (!strcmp(ioat_interrupt_style, "msix-single-vector")) 1460 goto msix_single_vector; 1461 if (!strcmp(ioat_interrupt_style, "msi")) 1462 goto msi; 1463 if (!strcmp(ioat_interrupt_style, "intx")) 1464 goto intx; 1465 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", 1466 ioat_interrupt_style); 1467 goto err_no_irq; 1468 1469msix: 1470 /* The number of MSI-X vectors should equal the number of channels */ 1471 msixcnt = device->common.chancnt; 1472 for (i = 0; i < msixcnt; i++) 1473 device->msix_entries[i].entry = i; 1474 1475 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); 1476 if (err < 0) 1477 goto msi; 1478 if (err > 0) 1479 goto msix_single_vector; 1480 1481 for (i = 0; i < msixcnt; i++) { 1482 ioat_chan = ioat_lookup_chan_by_index(device, i); 1483 err = request_irq(device->msix_entries[i].vector, 1484 ioat_dma_do_interrupt_msix, 1485 0, "ioat-msix", ioat_chan); 1486 if (err) { 1487 for (j = 0; j < i; j++) { 1488 ioat_chan = 1489 ioat_lookup_chan_by_index(device, j); 1490 free_irq(device->msix_entries[j].vector, 1491 ioat_chan); 1492 } 1493 goto msix_single_vector; 1494 } 1495 } 1496 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 1497 device->irq_mode = msix_multi_vector; 1498 goto done; 1499 1500msix_single_vector: 1501 device->msix_entries[0].entry = 0; 1502 err = pci_enable_msix(device->pdev, device->msix_entries, 1); 1503 if (err) 1504 goto msi; 1505 1506 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, 1507 0, "ioat-msix", device); 1508 if (err) { 1509 pci_disable_msix(device->pdev); 1510 goto msi; 1511 } 1512 device->irq_mode = msix_single_vector; 1513 goto done; 1514 1515msi: 1516 err = pci_enable_msi(device->pdev); 1517 if (err) 1518 goto intx; 1519 1520 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, 1521 0, "ioat-msi", device); 1522 if (err) { 1523 pci_disable_msi(device->pdev); 1524 goto intx; 1525 } 1526 /* 1527 * CB 1.2 devices need a bit set in configuration space to enable MSI 1528 */ 1529 if (device->version == IOAT_VER_1_2) { 1530 u32 dmactrl; 1531 pci_read_config_dword(device->pdev, 1532 IOAT_PCI_DMACTRL_OFFSET, &dmactrl); 1533 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; 1534 pci_write_config_dword(device->pdev, 1535 IOAT_PCI_DMACTRL_OFFSET, dmactrl); 1536 } 1537 device->irq_mode = msi; 1538 goto done; 1539 1540intx: 1541 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, 1542 IRQF_SHARED, "ioat-intx", device); 1543 if (err) 1544 goto err_no_irq; 1545 device->irq_mode = intx; 1546 1547done: 1548 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 1549 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); 1550 return 0; 1551 1552err_no_irq: 1553 /* Disable all interrupt generation */ 1554 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1555 dev_err(&device->pdev->dev, "no usable interrupts\n"); 1556 device->irq_mode = none; 1557 return -1; 1558} 1559 1560/** 1561 * ioat_dma_remove_interrupts - remove whatever interrupts were set 1562 * @device: ioat device 1563 */ 1564static void ioat_dma_remove_interrupts(struct ioatdma_device *device) 1565{ 1566 struct ioat_dma_chan *ioat_chan; 1567 int i; 1568 1569 /* Disable all interrupt generation */ 1570 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1571 1572 switch (device->irq_mode) { 1573 case msix_multi_vector: 1574 for (i = 0; i < device->common.chancnt; i++) { 1575 ioat_chan = ioat_lookup_chan_by_index(device, i); 1576 free_irq(device->msix_entries[i].vector, ioat_chan); 1577 } 1578 pci_disable_msix(device->pdev); 1579 break; 1580 case msix_single_vector: 1581 free_irq(device->msix_entries[0].vector, device); 1582 pci_disable_msix(device->pdev); 1583 break; 1584 case msi: 1585 free_irq(device->pdev->irq, device); 1586 pci_disable_msi(device->pdev); 1587 break; 1588 case intx: 1589 free_irq(device->pdev->irq, device); 1590 break; 1591 case none: 1592 dev_warn(&device->pdev->dev, 1593 "call to %s without interrupts setup\n", __func__); 1594 } 1595 device->irq_mode = none; 1596} 1597 1598struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 1599 void __iomem *iobase) 1600{ 1601 int err; 1602 struct ioatdma_device *device; 1603 1604 device = kzalloc(sizeof(*device), GFP_KERNEL); 1605 if (!device) { 1606 err = -ENOMEM; 1607 goto err_kzalloc; 1608 } 1609 device->pdev = pdev; 1610 device->reg_base = iobase; 1611 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1612 1613 /* DMA coherent memory pool for DMA descriptor allocations */ 1614 device->dma_pool = pci_pool_create("dma_desc_pool", pdev, 1615 sizeof(struct ioat_dma_descriptor), 1616 64, 0); 1617 if (!device->dma_pool) { 1618 err = -ENOMEM; 1619 goto err_dma_pool; 1620 } 1621 1622 device->completion_pool = pci_pool_create("completion_pool", pdev, 1623 sizeof(u64), SMP_CACHE_BYTES, 1624 SMP_CACHE_BYTES); 1625 if (!device->completion_pool) { 1626 err = -ENOMEM; 1627 goto err_completion_pool; 1628 } 1629 1630 INIT_LIST_HEAD(&device->common.channels); 1631 ioat_dma_enumerate_channels(device); 1632 1633 device->common.device_alloc_chan_resources = 1634 ioat_dma_alloc_chan_resources; 1635 device->common.device_free_chan_resources = 1636 ioat_dma_free_chan_resources; 1637 device->common.dev = &pdev->dev; 1638 1639 dma_cap_set(DMA_MEMCPY, device->common.cap_mask); 1640 device->common.device_is_tx_complete = ioat_dma_is_complete; 1641 switch (device->version) { 1642 case IOAT_VER_1_2: 1643 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; 1644 device->common.device_issue_pending = 1645 ioat1_dma_memcpy_issue_pending; 1646 break; 1647 case IOAT_VER_2_0: 1648 case IOAT_VER_3_0: 1649 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; 1650 device->common.device_issue_pending = 1651 ioat2_dma_memcpy_issue_pending; 1652 break; 1653 } 1654 1655 dev_err(&device->pdev->dev, 1656 "Intel(R) I/OAT DMA Engine found," 1657 " %d channels, device version 0x%02x, driver version %s\n", 1658 device->common.chancnt, device->version, IOAT_DMA_VERSION); 1659 1660 err = ioat_dma_setup_interrupts(device); 1661 if (err) 1662 goto err_setup_interrupts; 1663 1664 err = ioat_dma_self_test(device); 1665 if (err) 1666 goto err_self_test; 1667 1668 ioat_set_tcp_copy_break(device); 1669 1670 dma_async_device_register(&device->common); 1671 1672 if (device->version != IOAT_VER_3_0) { 1673 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); 1674 schedule_delayed_work(&device->work, 1675 WATCHDOG_DELAY); 1676 } 1677 1678 return device; 1679 1680err_self_test: 1681 ioat_dma_remove_interrupts(device); 1682err_setup_interrupts: 1683 pci_pool_destroy(device->completion_pool); 1684err_completion_pool: 1685 pci_pool_destroy(device->dma_pool); 1686err_dma_pool: 1687 kfree(device); 1688err_kzalloc: 1689 dev_err(&pdev->dev, 1690 "Intel(R) I/OAT DMA Engine initialization failed\n"); 1691 return NULL; 1692} 1693 1694void ioat_dma_remove(struct ioatdma_device *device) 1695{ 1696 struct dma_chan *chan, *_chan; 1697 struct ioat_dma_chan *ioat_chan; 1698 1699 ioat_dma_remove_interrupts(device); 1700 1701 dma_async_device_unregister(&device->common); 1702 1703 pci_pool_destroy(device->dma_pool); 1704 pci_pool_destroy(device->completion_pool); 1705 1706 iounmap(device->reg_base); 1707 pci_release_regions(device->pdev); 1708 pci_disable_device(device->pdev); 1709 1710 if (device->version != IOAT_VER_3_0) { 1711 cancel_delayed_work(&device->work); 1712 } 1713 1714 list_for_each_entry_safe(chan, _chan, 1715 &device->common.channels, device_node) { 1716 ioat_chan = to_ioat_chan(chan); 1717 list_del(&chan->device_node); 1718 kfree(ioat_chan); 1719 } 1720 kfree(device); 1721} 1722