Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.9-rc3 921 lines 25 kB view raw
1/* 2 * Intel I/OAT DMA Linux driver 3 * Copyright(c) 2004 - 2009 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 */ 22 23/* 24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which 25 * does asynchronous data movement and checksumming operations. 26 */ 27 28#include <linux/init.h> 29#include <linux/module.h> 30#include <linux/slab.h> 31#include <linux/pci.h> 32#include <linux/interrupt.h> 33#include <linux/dmaengine.h> 34#include <linux/delay.h> 35#include <linux/dma-mapping.h> 36#include <linux/workqueue.h> 37#include <linux/prefetch.h> 38#include <linux/i7300_idle.h> 39#include "dma.h" 40#include "dma_v2.h" 41#include "registers.h" 42#include "hw.h" 43 44#include "../dmaengine.h" 45 46int ioat_ring_alloc_order = 8; 47module_param(ioat_ring_alloc_order, int, 0644); 48MODULE_PARM_DESC(ioat_ring_alloc_order, 49 "ioat2+: allocate 2^n descriptors per channel" 50 " (default: 8 max: 16)"); 51static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; 52module_param(ioat_ring_max_alloc_order, int, 0644); 53MODULE_PARM_DESC(ioat_ring_max_alloc_order, 54 "ioat2+: upper limit for ring size (default: 16)"); 55 56void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) 57{ 58 struct ioat_chan_common *chan = &ioat->base; 59 60 ioat->dmacount += ioat2_ring_pending(ioat); 61 ioat->issued = ioat->head; 62 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 63 dev_dbg(to_dev(chan), 64 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", 65 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 66} 67 68void ioat2_issue_pending(struct dma_chan *c) 69{ 70 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 71 72 if (ioat2_ring_pending(ioat)) { 73 spin_lock_bh(&ioat->prep_lock); 74 __ioat2_issue_pending(ioat); 75 spin_unlock_bh(&ioat->prep_lock); 76 } 77} 78 79/** 80 * ioat2_update_pending - log pending descriptors 81 * @ioat: ioat2+ channel 82 * 83 * Check if the number of unsubmitted descriptors has exceeded the 84 * watermark. Called with prep_lock held 85 */ 86static void ioat2_update_pending(struct ioat2_dma_chan *ioat) 87{ 88 if (ioat2_ring_pending(ioat) > ioat_pending_level) 89 __ioat2_issue_pending(ioat); 90} 91 92static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 93{ 94 struct ioat_ring_ent *desc; 95 struct ioat_dma_descriptor *hw; 96 97 if (ioat2_ring_space(ioat) < 1) { 98 dev_err(to_dev(&ioat->base), 99 "Unable to start null desc - ring full\n"); 100 return; 101 } 102 103 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", 104 __func__, ioat->head, ioat->tail, ioat->issued); 105 desc = ioat2_get_ring_ent(ioat, ioat->head); 106 107 hw = desc->hw; 108 hw->ctl = 0; 109 hw->ctl_f.null = 1; 110 hw->ctl_f.int_en = 1; 111 hw->ctl_f.compl_write = 1; 112 /* set size to non-zero value (channel returns error when size is 0) */ 113 hw->size = NULL_DESC_BUFFER_SIZE; 114 hw->src_addr = 0; 115 hw->dst_addr = 0; 116 async_tx_ack(&desc->txd); 117 ioat2_set_chainaddr(ioat, desc->txd.phys); 118 dump_desc_dbg(ioat, desc); 119 wmb(); 120 ioat->head += 1; 121 __ioat2_issue_pending(ioat); 122} 123 124static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 125{ 126 spin_lock_bh(&ioat->prep_lock); 127 __ioat2_start_null_desc(ioat); 128 spin_unlock_bh(&ioat->prep_lock); 129} 130 131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) 132{ 133 struct ioat_chan_common *chan = &ioat->base; 134 struct dma_async_tx_descriptor *tx; 135 struct ioat_ring_ent *desc; 136 bool seen_current = false; 137 u16 active; 138 int idx = ioat->tail, i; 139 140 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 141 __func__, ioat->head, ioat->tail, ioat->issued); 142 143 active = ioat2_ring_active(ioat); 144 for (i = 0; i < active && !seen_current; i++) { 145 smp_read_barrier_depends(); 146 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); 147 desc = ioat2_get_ring_ent(ioat, idx + i); 148 tx = &desc->txd; 149 dump_desc_dbg(ioat, desc); 150 if (tx->cookie) { 151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 152 dma_cookie_complete(tx); 153 if (tx->callback) { 154 tx->callback(tx->callback_param); 155 tx->callback = NULL; 156 } 157 } 158 159 if (tx->phys == phys_complete) 160 seen_current = true; 161 } 162 smp_mb(); /* finish all descriptor reads before incrementing tail */ 163 ioat->tail = idx + i; 164 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 165 166 chan->last_completion = phys_complete; 167 if (active - i == 0) { 168 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 169 __func__); 170 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 171 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 172 } 173} 174 175/** 176 * ioat2_cleanup - clean finished descriptors (advance tail pointer) 177 * @chan: ioat channel to be cleaned up 178 */ 179static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 180{ 181 struct ioat_chan_common *chan = &ioat->base; 182 dma_addr_t phys_complete; 183 184 spin_lock_bh(&chan->cleanup_lock); 185 if (ioat_cleanup_preamble(chan, &phys_complete)) 186 __cleanup(ioat, phys_complete); 187 spin_unlock_bh(&chan->cleanup_lock); 188} 189 190void ioat2_cleanup_event(unsigned long data) 191{ 192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 193 194 ioat2_cleanup(ioat); 195 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 196} 197 198void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) 199{ 200 struct ioat_chan_common *chan = &ioat->base; 201 202 /* set the tail to be re-issued */ 203 ioat->issued = ioat->tail; 204 ioat->dmacount = 0; 205 set_bit(IOAT_COMPLETION_PENDING, &chan->state); 206 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 207 208 dev_dbg(to_dev(chan), 209 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", 210 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 211 212 if (ioat2_ring_pending(ioat)) { 213 struct ioat_ring_ent *desc; 214 215 desc = ioat2_get_ring_ent(ioat, ioat->tail); 216 ioat2_set_chainaddr(ioat, desc->txd.phys); 217 __ioat2_issue_pending(ioat); 218 } else 219 __ioat2_start_null_desc(ioat); 220} 221 222int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) 223{ 224 unsigned long end = jiffies + tmo; 225 int err = 0; 226 u32 status; 227 228 status = ioat_chansts(chan); 229 if (is_ioat_active(status) || is_ioat_idle(status)) 230 ioat_suspend(chan); 231 while (is_ioat_active(status) || is_ioat_idle(status)) { 232 if (tmo && time_after(jiffies, end)) { 233 err = -ETIMEDOUT; 234 break; 235 } 236 status = ioat_chansts(chan); 237 cpu_relax(); 238 } 239 240 return err; 241} 242 243int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) 244{ 245 unsigned long end = jiffies + tmo; 246 int err = 0; 247 248 ioat_reset(chan); 249 while (ioat_reset_pending(chan)) { 250 if (end && time_after(jiffies, end)) { 251 err = -ETIMEDOUT; 252 break; 253 } 254 cpu_relax(); 255 } 256 257 return err; 258} 259 260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 261{ 262 struct ioat_chan_common *chan = &ioat->base; 263 dma_addr_t phys_complete; 264 265 ioat2_quiesce(chan, 0); 266 if (ioat_cleanup_preamble(chan, &phys_complete)) 267 __cleanup(ioat, phys_complete); 268 269 __ioat2_restart_chan(ioat); 270} 271 272static void check_active(struct ioat2_dma_chan *ioat) 273{ 274 struct ioat_chan_common *chan = &ioat->base; 275 276 if (ioat2_ring_active(ioat)) { 277 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 278 return; 279 } 280 281 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) 282 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 283 else if (ioat->alloc_order > ioat_get_alloc_order()) { 284 /* if the ring is idle, empty, and oversized try to step 285 * down the size 286 */ 287 reshape_ring(ioat, ioat->alloc_order - 1); 288 289 /* keep shrinking until we get back to our minimum 290 * default size 291 */ 292 if (ioat->alloc_order > ioat_get_alloc_order()) 293 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 294 } 295 296} 297 298void ioat2_timer_event(unsigned long data) 299{ 300 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 301 struct ioat_chan_common *chan = &ioat->base; 302 dma_addr_t phys_complete; 303 u64 status; 304 305 status = ioat_chansts(chan); 306 307 /* when halted due to errors check for channel 308 * programming errors before advancing the completion state 309 */ 310 if (is_ioat_halted(status)) { 311 u32 chanerr; 312 313 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 314 dev_err(to_dev(chan), "%s: Channel halted (%x)\n", 315 __func__, chanerr); 316 if (test_bit(IOAT_RUN, &chan->state)) 317 BUG_ON(is_ioat_bug(chanerr)); 318 else /* we never got off the ground */ 319 return; 320 } 321 322 /* if we haven't made progress and we have already 323 * acknowledged a pending completion once, then be more 324 * forceful with a restart 325 */ 326 spin_lock_bh(&chan->cleanup_lock); 327 if (ioat_cleanup_preamble(chan, &phys_complete)) 328 __cleanup(ioat, phys_complete); 329 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { 330 spin_lock_bh(&ioat->prep_lock); 331 ioat2_restart_channel(ioat); 332 spin_unlock_bh(&ioat->prep_lock); 333 spin_unlock_bh(&chan->cleanup_lock); 334 return; 335 } else { 336 set_bit(IOAT_COMPLETION_ACK, &chan->state); 337 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 338 } 339 340 341 if (ioat2_ring_active(ioat)) 342 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 343 else { 344 spin_lock_bh(&ioat->prep_lock); 345 check_active(ioat); 346 spin_unlock_bh(&ioat->prep_lock); 347 } 348 spin_unlock_bh(&chan->cleanup_lock); 349} 350 351static int ioat2_reset_hw(struct ioat_chan_common *chan) 352{ 353 /* throw away whatever the channel was doing and get it initialized */ 354 u32 chanerr; 355 356 ioat2_quiesce(chan, msecs_to_jiffies(100)); 357 358 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 359 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 360 361 return ioat2_reset_sync(chan, msecs_to_jiffies(200)); 362} 363 364/** 365 * ioat2_enumerate_channels - find and initialize the device's channels 366 * @device: the device to be enumerated 367 */ 368int ioat2_enumerate_channels(struct ioatdma_device *device) 369{ 370 struct ioat2_dma_chan *ioat; 371 struct device *dev = &device->pdev->dev; 372 struct dma_device *dma = &device->common; 373 u8 xfercap_log; 374 int i; 375 376 INIT_LIST_HEAD(&dma->channels); 377 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 378 dma->chancnt &= 0x1f; /* bits [4:0] valid */ 379 if (dma->chancnt > ARRAY_SIZE(device->idx)) { 380 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", 381 dma->chancnt, ARRAY_SIZE(device->idx)); 382 dma->chancnt = ARRAY_SIZE(device->idx); 383 } 384 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 385 xfercap_log &= 0x1f; /* bits [4:0] valid */ 386 if (xfercap_log == 0) 387 return 0; 388 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); 389 390 /* FIXME which i/oat version is i7300? */ 391#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 392 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) 393 dma->chancnt--; 394#endif 395 for (i = 0; i < dma->chancnt; i++) { 396 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); 397 if (!ioat) 398 break; 399 400 ioat_init_channel(device, &ioat->base, i); 401 ioat->xfercap_log = xfercap_log; 402 spin_lock_init(&ioat->prep_lock); 403 if (device->reset_hw(&ioat->base)) { 404 i = 0; 405 break; 406 } 407 } 408 dma->chancnt = i; 409 return i; 410} 411 412static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) 413{ 414 struct dma_chan *c = tx->chan; 415 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 416 struct ioat_chan_common *chan = &ioat->base; 417 dma_cookie_t cookie; 418 419 cookie = dma_cookie_assign(tx); 420 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 421 422 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) 423 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 424 425 /* make descriptor updates visible before advancing ioat->head, 426 * this is purposefully not smp_wmb() since we are also 427 * publishing the descriptor updates to a dma device 428 */ 429 wmb(); 430 431 ioat->head += ioat->produce; 432 433 ioat2_update_pending(ioat); 434 spin_unlock_bh(&ioat->prep_lock); 435 436 return cookie; 437} 438 439static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) 440{ 441 struct ioat_dma_descriptor *hw; 442 struct ioat_ring_ent *desc; 443 struct ioatdma_device *dma; 444 dma_addr_t phys; 445 446 dma = to_ioatdma_device(chan->device); 447 hw = pci_pool_alloc(dma->dma_pool, flags, &phys); 448 if (!hw) 449 return NULL; 450 memset(hw, 0, sizeof(*hw)); 451 452 desc = kmem_cache_zalloc(ioat2_cache, flags); 453 if (!desc) { 454 pci_pool_free(dma->dma_pool, hw, phys); 455 return NULL; 456 } 457 458 dma_async_tx_descriptor_init(&desc->txd, chan); 459 desc->txd.tx_submit = ioat2_tx_submit_unlock; 460 desc->hw = hw; 461 desc->txd.phys = phys; 462 return desc; 463} 464 465static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) 466{ 467 struct ioatdma_device *dma; 468 469 dma = to_ioatdma_device(chan->device); 470 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); 471 kmem_cache_free(ioat2_cache, desc); 472} 473 474static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) 475{ 476 struct ioat_ring_ent **ring; 477 int descs = 1 << order; 478 int i; 479 480 if (order > ioat_get_max_alloc_order()) 481 return NULL; 482 483 /* allocate the array to hold the software ring */ 484 ring = kcalloc(descs, sizeof(*ring), flags); 485 if (!ring) 486 return NULL; 487 for (i = 0; i < descs; i++) { 488 ring[i] = ioat2_alloc_ring_ent(c, flags); 489 if (!ring[i]) { 490 while (i--) 491 ioat2_free_ring_ent(ring[i], c); 492 kfree(ring); 493 return NULL; 494 } 495 set_desc_id(ring[i], i); 496 } 497 498 /* link descs */ 499 for (i = 0; i < descs-1; i++) { 500 struct ioat_ring_ent *next = ring[i+1]; 501 struct ioat_dma_descriptor *hw = ring[i]->hw; 502 503 hw->next = next->txd.phys; 504 } 505 ring[i]->hw->next = ring[0]->txd.phys; 506 507 return ring; 508} 509 510void ioat2_free_chan_resources(struct dma_chan *c); 511 512/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring 513 * @chan: channel to be initialized 514 */ 515int ioat2_alloc_chan_resources(struct dma_chan *c) 516{ 517 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 518 struct ioat_chan_common *chan = &ioat->base; 519 struct ioat_ring_ent **ring; 520 u64 status; 521 int order; 522 int i = 0; 523 524 /* have we already been set up? */ 525 if (ioat->ring) 526 return 1 << ioat->alloc_order; 527 528 /* Setup register to interrupt and write completion status on error */ 529 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 530 531 /* allocate a completion writeback area */ 532 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 533 chan->completion = pci_pool_alloc(chan->device->completion_pool, 534 GFP_KERNEL, &chan->completion_dma); 535 if (!chan->completion) 536 return -ENOMEM; 537 538 memset(chan->completion, 0, sizeof(*chan->completion)); 539 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, 540 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 541 writel(((u64) chan->completion_dma) >> 32, 542 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 543 544 order = ioat_get_alloc_order(); 545 ring = ioat2_alloc_ring(c, order, GFP_KERNEL); 546 if (!ring) 547 return -ENOMEM; 548 549 spin_lock_bh(&chan->cleanup_lock); 550 spin_lock_bh(&ioat->prep_lock); 551 ioat->ring = ring; 552 ioat->head = 0; 553 ioat->issued = 0; 554 ioat->tail = 0; 555 ioat->alloc_order = order; 556 spin_unlock_bh(&ioat->prep_lock); 557 spin_unlock_bh(&chan->cleanup_lock); 558 559 tasklet_enable(&chan->cleanup_task); 560 ioat2_start_null_desc(ioat); 561 562 /* check that we got off the ground */ 563 do { 564 udelay(1); 565 status = ioat_chansts(chan); 566 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); 567 568 if (is_ioat_active(status) || is_ioat_idle(status)) { 569 set_bit(IOAT_RUN, &chan->state); 570 return 1 << ioat->alloc_order; 571 } else { 572 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 573 574 dev_WARN(to_dev(chan), 575 "failed to start channel chanerr: %#x\n", chanerr); 576 ioat2_free_chan_resources(c); 577 return -EFAULT; 578 } 579} 580 581bool reshape_ring(struct ioat2_dma_chan *ioat, int order) 582{ 583 /* reshape differs from normal ring allocation in that we want 584 * to allocate a new software ring while only 585 * extending/truncating the hardware ring 586 */ 587 struct ioat_chan_common *chan = &ioat->base; 588 struct dma_chan *c = &chan->common; 589 const u32 curr_size = ioat2_ring_size(ioat); 590 const u16 active = ioat2_ring_active(ioat); 591 const u32 new_size = 1 << order; 592 struct ioat_ring_ent **ring; 593 u16 i; 594 595 if (order > ioat_get_max_alloc_order()) 596 return false; 597 598 /* double check that we have at least 1 free descriptor */ 599 if (active == curr_size) 600 return false; 601 602 /* when shrinking, verify that we can hold the current active 603 * set in the new ring 604 */ 605 if (active >= new_size) 606 return false; 607 608 /* allocate the array to hold the software ring */ 609 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); 610 if (!ring) 611 return false; 612 613 /* allocate/trim descriptors as needed */ 614 if (new_size > curr_size) { 615 /* copy current descriptors to the new ring */ 616 for (i = 0; i < curr_size; i++) { 617 u16 curr_idx = (ioat->tail+i) & (curr_size-1); 618 u16 new_idx = (ioat->tail+i) & (new_size-1); 619 620 ring[new_idx] = ioat->ring[curr_idx]; 621 set_desc_id(ring[new_idx], new_idx); 622 } 623 624 /* add new descriptors to the ring */ 625 for (i = curr_size; i < new_size; i++) { 626 u16 new_idx = (ioat->tail+i) & (new_size-1); 627 628 ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); 629 if (!ring[new_idx]) { 630 while (i--) { 631 u16 new_idx = (ioat->tail+i) & (new_size-1); 632 633 ioat2_free_ring_ent(ring[new_idx], c); 634 } 635 kfree(ring); 636 return false; 637 } 638 set_desc_id(ring[new_idx], new_idx); 639 } 640 641 /* hw link new descriptors */ 642 for (i = curr_size-1; i < new_size; i++) { 643 u16 new_idx = (ioat->tail+i) & (new_size-1); 644 struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; 645 struct ioat_dma_descriptor *hw = ring[new_idx]->hw; 646 647 hw->next = next->txd.phys; 648 } 649 } else { 650 struct ioat_dma_descriptor *hw; 651 struct ioat_ring_ent *next; 652 653 /* copy current descriptors to the new ring, dropping the 654 * removed descriptors 655 */ 656 for (i = 0; i < new_size; i++) { 657 u16 curr_idx = (ioat->tail+i) & (curr_size-1); 658 u16 new_idx = (ioat->tail+i) & (new_size-1); 659 660 ring[new_idx] = ioat->ring[curr_idx]; 661 set_desc_id(ring[new_idx], new_idx); 662 } 663 664 /* free deleted descriptors */ 665 for (i = new_size; i < curr_size; i++) { 666 struct ioat_ring_ent *ent; 667 668 ent = ioat2_get_ring_ent(ioat, ioat->tail+i); 669 ioat2_free_ring_ent(ent, c); 670 } 671 672 /* fix up hardware ring */ 673 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; 674 next = ring[(ioat->tail+new_size) & (new_size-1)]; 675 hw->next = next->txd.phys; 676 } 677 678 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", 679 __func__, new_size); 680 681 kfree(ioat->ring); 682 ioat->ring = ring; 683 ioat->alloc_order = order; 684 685 return true; 686} 687 688/** 689 * ioat2_check_space_lock - verify space and grab ring producer lock 690 * @ioat: ioat2,3 channel (ring) to operate on 691 * @num_descs: allocation length 692 */ 693int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) 694{ 695 struct ioat_chan_common *chan = &ioat->base; 696 bool retry; 697 698 retry: 699 spin_lock_bh(&ioat->prep_lock); 700 /* never allow the last descriptor to be consumed, we need at 701 * least one free at all times to allow for on-the-fly ring 702 * resizing. 703 */ 704 if (likely(ioat2_ring_space(ioat) > num_descs)) { 705 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", 706 __func__, num_descs, ioat->head, ioat->tail, ioat->issued); 707 ioat->produce = num_descs; 708 return 0; /* with ioat->prep_lock held */ 709 } 710 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); 711 spin_unlock_bh(&ioat->prep_lock); 712 713 /* is another cpu already trying to expand the ring? */ 714 if (retry) 715 goto retry; 716 717 spin_lock_bh(&chan->cleanup_lock); 718 spin_lock_bh(&ioat->prep_lock); 719 retry = reshape_ring(ioat, ioat->alloc_order + 1); 720 clear_bit(IOAT_RESHAPE_PENDING, &chan->state); 721 spin_unlock_bh(&ioat->prep_lock); 722 spin_unlock_bh(&chan->cleanup_lock); 723 724 /* if we were able to expand the ring retry the allocation */ 725 if (retry) 726 goto retry; 727 728 if (printk_ratelimit()) 729 dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", 730 __func__, num_descs, ioat->head, ioat->tail, ioat->issued); 731 732 /* progress reclaim in the allocation failure case we may be 733 * called under bh_disabled so we need to trigger the timer 734 * event directly 735 */ 736 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { 737 struct ioatdma_device *device = chan->device; 738 739 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 740 device->timer_fn((unsigned long) &chan->common); 741 } 742 743 return -ENOMEM; 744} 745 746struct dma_async_tx_descriptor * 747ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 748 dma_addr_t dma_src, size_t len, unsigned long flags) 749{ 750 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 751 struct ioat_dma_descriptor *hw; 752 struct ioat_ring_ent *desc; 753 dma_addr_t dst = dma_dest; 754 dma_addr_t src = dma_src; 755 size_t total_len = len; 756 int num_descs, idx, i; 757 758 num_descs = ioat2_xferlen_to_descs(ioat, len); 759 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) 760 idx = ioat->head; 761 else 762 return NULL; 763 i = 0; 764 do { 765 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); 766 767 desc = ioat2_get_ring_ent(ioat, idx + i); 768 hw = desc->hw; 769 770 hw->size = copy; 771 hw->ctl = 0; 772 hw->src_addr = src; 773 hw->dst_addr = dst; 774 775 len -= copy; 776 dst += copy; 777 src += copy; 778 dump_desc_dbg(ioat, desc); 779 } while (++i < num_descs); 780 781 desc->txd.flags = flags; 782 desc->len = total_len; 783 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 784 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 785 hw->ctl_f.compl_write = 1; 786 dump_desc_dbg(ioat, desc); 787 /* we leave the channel locked to ensure in order submission */ 788 789 return &desc->txd; 790} 791 792/** 793 * ioat2_free_chan_resources - release all the descriptors 794 * @chan: the channel to be cleaned 795 */ 796void ioat2_free_chan_resources(struct dma_chan *c) 797{ 798 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 799 struct ioat_chan_common *chan = &ioat->base; 800 struct ioatdma_device *device = chan->device; 801 struct ioat_ring_ent *desc; 802 const u16 total_descs = 1 << ioat->alloc_order; 803 int descs; 804 int i; 805 806 /* Before freeing channel resources first check 807 * if they have been previously allocated for this channel. 808 */ 809 if (!ioat->ring) 810 return; 811 812 tasklet_disable(&chan->cleanup_task); 813 del_timer_sync(&chan->timer); 814 device->cleanup_fn((unsigned long) c); 815 device->reset_hw(chan); 816 clear_bit(IOAT_RUN, &chan->state); 817 818 spin_lock_bh(&chan->cleanup_lock); 819 spin_lock_bh(&ioat->prep_lock); 820 descs = ioat2_ring_space(ioat); 821 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); 822 for (i = 0; i < descs; i++) { 823 desc = ioat2_get_ring_ent(ioat, ioat->head + i); 824 ioat2_free_ring_ent(desc, c); 825 } 826 827 if (descs < total_descs) 828 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", 829 total_descs - descs); 830 831 for (i = 0; i < total_descs - descs; i++) { 832 desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 833 dump_desc_dbg(ioat, desc); 834 ioat2_free_ring_ent(desc, c); 835 } 836 837 kfree(ioat->ring); 838 ioat->ring = NULL; 839 ioat->alloc_order = 0; 840 pci_pool_free(device->completion_pool, chan->completion, 841 chan->completion_dma); 842 spin_unlock_bh(&ioat->prep_lock); 843 spin_unlock_bh(&chan->cleanup_lock); 844 845 chan->last_completion = 0; 846 chan->completion_dma = 0; 847 ioat->dmacount = 0; 848} 849 850static ssize_t ring_size_show(struct dma_chan *c, char *page) 851{ 852 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 853 854 return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); 855} 856static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); 857 858static ssize_t ring_active_show(struct dma_chan *c, char *page) 859{ 860 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 861 862 /* ...taken outside the lock, no need to be precise */ 863 return sprintf(page, "%d\n", ioat2_ring_active(ioat)); 864} 865static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); 866 867static struct attribute *ioat2_attrs[] = { 868 &ring_size_attr.attr, 869 &ring_active_attr.attr, 870 &ioat_cap_attr.attr, 871 &ioat_version_attr.attr, 872 NULL, 873}; 874 875struct kobj_type ioat2_ktype = { 876 .sysfs_ops = &ioat_sysfs_ops, 877 .default_attrs = ioat2_attrs, 878}; 879 880int ioat2_dma_probe(struct ioatdma_device *device, int dca) 881{ 882 struct pci_dev *pdev = device->pdev; 883 struct dma_device *dma; 884 struct dma_chan *c; 885 struct ioat_chan_common *chan; 886 int err; 887 888 device->enumerate_channels = ioat2_enumerate_channels; 889 device->reset_hw = ioat2_reset_hw; 890 device->cleanup_fn = ioat2_cleanup_event; 891 device->timer_fn = ioat2_timer_event; 892 device->self_test = ioat_dma_self_test; 893 dma = &device->common; 894 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 895 dma->device_issue_pending = ioat2_issue_pending; 896 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 897 dma->device_free_chan_resources = ioat2_free_chan_resources; 898 dma->device_tx_status = ioat_dma_tx_status; 899 900 err = ioat_probe(device); 901 if (err) 902 return err; 903 ioat_set_tcp_copy_break(2048); 904 905 list_for_each_entry(c, &dma->channels, device_node) { 906 chan = to_chan_common(c); 907 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, 908 chan->reg_base + IOAT_DCACTRL_OFFSET); 909 } 910 911 err = ioat_register(device); 912 if (err) 913 return err; 914 915 ioat_kobject_add(device, &ioat2_ktype); 916 917 if (dca) 918 device->dca = ioat2_dca_init(pdev, device->reg_base); 919 920 return err; 921}