Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.14-rc2 1673 lines 47 kB view raw
1/* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * The full GNU General Public License is included in this distribution in 23 * the file called "COPYING". 24 * 25 * BSD LICENSE 26 * 27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions are met: 31 * 32 * * Redistributions of source code must retain the above copyright 33 * notice, this list of conditions and the following disclaimer. 34 * * Redistributions in binary form must reproduce the above copyright 35 * notice, this list of conditions and the following disclaimer in 36 * the documentation and/or other materials provided with the 37 * distribution. 38 * * Neither the name of Intel Corporation nor the names of its 39 * contributors may be used to endorse or promote products derived 40 * from this software without specific prior written permission. 41 * 42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 52 * POSSIBILITY OF SUCH DAMAGE. 53 */ 54 55/* 56 * Support routines for v3+ hardware 57 */ 58#include <linux/module.h> 59#include <linux/pci.h> 60#include <linux/gfp.h> 61#include <linux/dmaengine.h> 62#include <linux/dma-mapping.h> 63#include <linux/prefetch.h> 64#include "../dmaengine.h" 65#include "registers.h" 66#include "hw.h" 67#include "dma.h" 68#include "dma_v2.h" 69 70extern struct kmem_cache *ioat3_sed_cache; 71 72/* ioat hardware assumes at least two sources for raid operations */ 73#define src_cnt_to_sw(x) ((x) + 2) 74#define src_cnt_to_hw(x) ((x) - 2) 75#define ndest_to_sw(x) ((x) + 1) 76#define ndest_to_hw(x) ((x) - 1) 77#define src16_cnt_to_sw(x) ((x) + 9) 78#define src16_cnt_to_hw(x) ((x) - 9) 79 80/* provide a lookup table for setting the source address in the base or 81 * extended descriptor of an xor or pq descriptor 82 */ 83static const u8 xor_idx_to_desc = 0xe0; 84static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; 85static const u8 pq_idx_to_desc = 0xf8; 86static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 87 2, 2, 2, 2, 2, 2, 2 }; 88static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; 89static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 90 0, 1, 2, 3, 4, 5, 6 }; 91 92static void ioat3_eh(struct ioat2_dma_chan *ioat); 93 94static void xor_set_src(struct ioat_raw_descriptor *descs[2], 95 dma_addr_t addr, u32 offset, int idx) 96{ 97 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; 98 99 raw->field[xor_idx_to_field[idx]] = addr + offset; 100} 101 102static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) 103{ 104 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; 105 106 return raw->field[pq_idx_to_field[idx]]; 107} 108 109static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) 110{ 111 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; 112 113 return raw->field[pq16_idx_to_field[idx]]; 114} 115 116static void pq_set_src(struct ioat_raw_descriptor *descs[2], 117 dma_addr_t addr, u32 offset, u8 coef, int idx) 118{ 119 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; 120 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; 121 122 raw->field[pq_idx_to_field[idx]] = addr + offset; 123 pq->coef[idx] = coef; 124} 125 126static bool is_jf_ioat(struct pci_dev *pdev) 127{ 128 switch (pdev->device) { 129 case PCI_DEVICE_ID_INTEL_IOAT_JSF0: 130 case PCI_DEVICE_ID_INTEL_IOAT_JSF1: 131 case PCI_DEVICE_ID_INTEL_IOAT_JSF2: 132 case PCI_DEVICE_ID_INTEL_IOAT_JSF3: 133 case PCI_DEVICE_ID_INTEL_IOAT_JSF4: 134 case PCI_DEVICE_ID_INTEL_IOAT_JSF5: 135 case PCI_DEVICE_ID_INTEL_IOAT_JSF6: 136 case PCI_DEVICE_ID_INTEL_IOAT_JSF7: 137 case PCI_DEVICE_ID_INTEL_IOAT_JSF8: 138 case PCI_DEVICE_ID_INTEL_IOAT_JSF9: 139 return true; 140 default: 141 return false; 142 } 143} 144 145static bool is_snb_ioat(struct pci_dev *pdev) 146{ 147 switch (pdev->device) { 148 case PCI_DEVICE_ID_INTEL_IOAT_SNB0: 149 case PCI_DEVICE_ID_INTEL_IOAT_SNB1: 150 case PCI_DEVICE_ID_INTEL_IOAT_SNB2: 151 case PCI_DEVICE_ID_INTEL_IOAT_SNB3: 152 case PCI_DEVICE_ID_INTEL_IOAT_SNB4: 153 case PCI_DEVICE_ID_INTEL_IOAT_SNB5: 154 case PCI_DEVICE_ID_INTEL_IOAT_SNB6: 155 case PCI_DEVICE_ID_INTEL_IOAT_SNB7: 156 case PCI_DEVICE_ID_INTEL_IOAT_SNB8: 157 case PCI_DEVICE_ID_INTEL_IOAT_SNB9: 158 return true; 159 default: 160 return false; 161 } 162} 163 164static bool is_ivb_ioat(struct pci_dev *pdev) 165{ 166 switch (pdev->device) { 167 case PCI_DEVICE_ID_INTEL_IOAT_IVB0: 168 case PCI_DEVICE_ID_INTEL_IOAT_IVB1: 169 case PCI_DEVICE_ID_INTEL_IOAT_IVB2: 170 case PCI_DEVICE_ID_INTEL_IOAT_IVB3: 171 case PCI_DEVICE_ID_INTEL_IOAT_IVB4: 172 case PCI_DEVICE_ID_INTEL_IOAT_IVB5: 173 case PCI_DEVICE_ID_INTEL_IOAT_IVB6: 174 case PCI_DEVICE_ID_INTEL_IOAT_IVB7: 175 case PCI_DEVICE_ID_INTEL_IOAT_IVB8: 176 case PCI_DEVICE_ID_INTEL_IOAT_IVB9: 177 return true; 178 default: 179 return false; 180 } 181 182} 183 184static bool is_hsw_ioat(struct pci_dev *pdev) 185{ 186 switch (pdev->device) { 187 case PCI_DEVICE_ID_INTEL_IOAT_HSW0: 188 case PCI_DEVICE_ID_INTEL_IOAT_HSW1: 189 case PCI_DEVICE_ID_INTEL_IOAT_HSW2: 190 case PCI_DEVICE_ID_INTEL_IOAT_HSW3: 191 case PCI_DEVICE_ID_INTEL_IOAT_HSW4: 192 case PCI_DEVICE_ID_INTEL_IOAT_HSW5: 193 case PCI_DEVICE_ID_INTEL_IOAT_HSW6: 194 case PCI_DEVICE_ID_INTEL_IOAT_HSW7: 195 case PCI_DEVICE_ID_INTEL_IOAT_HSW8: 196 case PCI_DEVICE_ID_INTEL_IOAT_HSW9: 197 return true; 198 default: 199 return false; 200 } 201 202} 203 204static bool is_xeon_cb32(struct pci_dev *pdev) 205{ 206 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || 207 is_hsw_ioat(pdev); 208} 209 210static bool is_bwd_ioat(struct pci_dev *pdev) 211{ 212 switch (pdev->device) { 213 case PCI_DEVICE_ID_INTEL_IOAT_BWD0: 214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 217 return true; 218 default: 219 return false; 220 } 221} 222 223static bool is_bwd_noraid(struct pci_dev *pdev) 224{ 225 switch (pdev->device) { 226 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 227 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 228 return true; 229 default: 230 return false; 231 } 232 233} 234 235static void pq16_set_src(struct ioat_raw_descriptor *desc[3], 236 dma_addr_t addr, u32 offset, u8 coef, unsigned idx) 237{ 238 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; 239 struct ioat_pq16a_descriptor *pq16 = 240 (struct ioat_pq16a_descriptor *)desc[1]; 241 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; 242 243 raw->field[pq16_idx_to_field[idx]] = addr + offset; 244 245 if (idx < 8) 246 pq->coef[idx] = coef; 247 else 248 pq16->coef[idx - 8] = coef; 249} 250 251static struct ioat_sed_ent * 252ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) 253{ 254 struct ioat_sed_ent *sed; 255 gfp_t flags = __GFP_ZERO | GFP_ATOMIC; 256 257 sed = kmem_cache_alloc(ioat3_sed_cache, flags); 258 if (!sed) 259 return NULL; 260 261 sed->hw_pool = hw_pool; 262 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], 263 flags, &sed->dma); 264 if (!sed->hw) { 265 kmem_cache_free(ioat3_sed_cache, sed); 266 return NULL; 267 } 268 269 return sed; 270} 271 272static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) 273{ 274 if (!sed) 275 return; 276 277 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); 278 kmem_cache_free(ioat3_sed_cache, sed); 279} 280 281static bool desc_has_ext(struct ioat_ring_ent *desc) 282{ 283 struct ioat_dma_descriptor *hw = desc->hw; 284 285 if (hw->ctl_f.op == IOAT_OP_XOR || 286 hw->ctl_f.op == IOAT_OP_XOR_VAL) { 287 struct ioat_xor_descriptor *xor = desc->xor; 288 289 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) 290 return true; 291 } else if (hw->ctl_f.op == IOAT_OP_PQ || 292 hw->ctl_f.op == IOAT_OP_PQ_VAL) { 293 struct ioat_pq_descriptor *pq = desc->pq; 294 295 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) 296 return true; 297 } 298 299 return false; 300} 301 302static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) 303{ 304 u64 phys_complete; 305 u64 completion; 306 307 completion = *chan->completion; 308 phys_complete = ioat_chansts_to_addr(completion); 309 310 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 311 (unsigned long long) phys_complete); 312 313 return phys_complete; 314} 315 316static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, 317 u64 *phys_complete) 318{ 319 *phys_complete = ioat3_get_current_completion(chan); 320 if (*phys_complete == chan->last_completion) 321 return false; 322 323 clear_bit(IOAT_COMPLETION_ACK, &chan->state); 324 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 325 326 return true; 327} 328 329static void 330desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) 331{ 332 struct ioat_dma_descriptor *hw = desc->hw; 333 334 switch (hw->ctl_f.op) { 335 case IOAT_OP_PQ_VAL: 336 case IOAT_OP_PQ_VAL_16S: 337 { 338 struct ioat_pq_descriptor *pq = desc->pq; 339 340 /* check if there's error written */ 341 if (!pq->dwbes_f.wbes) 342 return; 343 344 /* need to set a chanerr var for checking to clear later */ 345 346 if (pq->dwbes_f.p_val_err) 347 *desc->result |= SUM_CHECK_P_RESULT; 348 349 if (pq->dwbes_f.q_val_err) 350 *desc->result |= SUM_CHECK_Q_RESULT; 351 352 return; 353 } 354 default: 355 return; 356 } 357} 358 359/** 360 * __cleanup - reclaim used descriptors 361 * @ioat: channel (ring) to clean 362 * 363 * The difference from the dma_v2.c __cleanup() is that this routine 364 * handles extended descriptors and dma-unmapping raid operations. 365 */ 366static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) 367{ 368 struct ioat_chan_common *chan = &ioat->base; 369 struct ioatdma_device *device = chan->device; 370 struct ioat_ring_ent *desc; 371 bool seen_current = false; 372 int idx = ioat->tail, i; 373 u16 active; 374 375 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 376 __func__, ioat->head, ioat->tail, ioat->issued); 377 378 /* 379 * At restart of the channel, the completion address and the 380 * channel status will be 0 due to starting a new chain. Since 381 * it's new chain and the first descriptor "fails", there is 382 * nothing to clean up. We do not want to reap the entire submitted 383 * chain due to this 0 address value and then BUG. 384 */ 385 if (!phys_complete) 386 return; 387 388 active = ioat2_ring_active(ioat); 389 for (i = 0; i < active && !seen_current; i++) { 390 struct dma_async_tx_descriptor *tx; 391 392 smp_read_barrier_depends(); 393 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); 394 desc = ioat2_get_ring_ent(ioat, idx + i); 395 dump_desc_dbg(ioat, desc); 396 397 /* set err stat if we are using dwbes */ 398 if (device->cap & IOAT_CAP_DWBES) 399 desc_get_errstat(ioat, desc); 400 401 tx = &desc->txd; 402 if (tx->cookie) { 403 dma_cookie_complete(tx); 404 dma_descriptor_unmap(tx); 405 if (tx->callback) { 406 tx->callback(tx->callback_param); 407 tx->callback = NULL; 408 } 409 } 410 411 if (tx->phys == phys_complete) 412 seen_current = true; 413 414 /* skip extended descriptors */ 415 if (desc_has_ext(desc)) { 416 BUG_ON(i + 1 >= active); 417 i++; 418 } 419 420 /* cleanup super extended descriptors */ 421 if (desc->sed) { 422 ioat3_free_sed(device, desc->sed); 423 desc->sed = NULL; 424 } 425 } 426 smp_mb(); /* finish all descriptor reads before incrementing tail */ 427 ioat->tail = idx + i; 428 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 429 chan->last_completion = phys_complete; 430 431 if (active - i == 0) { 432 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 433 __func__); 434 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 435 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 436 } 437 /* 5 microsecond delay per pending descriptor */ 438 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), 439 chan->device->reg_base + IOAT_INTRDELAY_OFFSET); 440} 441 442static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 443{ 444 struct ioat_chan_common *chan = &ioat->base; 445 u64 phys_complete; 446 447 spin_lock_bh(&chan->cleanup_lock); 448 449 if (ioat3_cleanup_preamble(chan, &phys_complete)) 450 __cleanup(ioat, phys_complete); 451 452 if (is_ioat_halted(*chan->completion)) { 453 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 454 455 if (chanerr & IOAT_CHANERR_HANDLE_MASK) { 456 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 457 ioat3_eh(ioat); 458 } 459 } 460 461 spin_unlock_bh(&chan->cleanup_lock); 462} 463 464static void ioat3_cleanup_event(unsigned long data) 465{ 466 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 467 468 ioat3_cleanup(ioat); 469 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 470} 471 472static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 473{ 474 struct ioat_chan_common *chan = &ioat->base; 475 u64 phys_complete; 476 477 ioat2_quiesce(chan, 0); 478 if (ioat3_cleanup_preamble(chan, &phys_complete)) 479 __cleanup(ioat, phys_complete); 480 481 __ioat2_restart_chan(ioat); 482} 483 484static void ioat3_eh(struct ioat2_dma_chan *ioat) 485{ 486 struct ioat_chan_common *chan = &ioat->base; 487 struct pci_dev *pdev = to_pdev(chan); 488 struct ioat_dma_descriptor *hw; 489 u64 phys_complete; 490 struct ioat_ring_ent *desc; 491 u32 err_handled = 0; 492 u32 chanerr_int; 493 u32 chanerr; 494 495 /* cleanup so tail points to descriptor that caused the error */ 496 if (ioat3_cleanup_preamble(chan, &phys_complete)) 497 __cleanup(ioat, phys_complete); 498 499 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 500 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); 501 502 dev_dbg(to_dev(chan), "%s: error = %x:%x\n", 503 __func__, chanerr, chanerr_int); 504 505 desc = ioat2_get_ring_ent(ioat, ioat->tail); 506 hw = desc->hw; 507 dump_desc_dbg(ioat, desc); 508 509 switch (hw->ctl_f.op) { 510 case IOAT_OP_XOR_VAL: 511 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { 512 *desc->result |= SUM_CHECK_P_RESULT; 513 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; 514 } 515 break; 516 case IOAT_OP_PQ_VAL: 517 case IOAT_OP_PQ_VAL_16S: 518 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { 519 *desc->result |= SUM_CHECK_P_RESULT; 520 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; 521 } 522 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { 523 *desc->result |= SUM_CHECK_Q_RESULT; 524 err_handled |= IOAT_CHANERR_XOR_Q_ERR; 525 } 526 break; 527 } 528 529 /* fault on unhandled error or spurious halt */ 530 if (chanerr ^ err_handled || chanerr == 0) { 531 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 532 __func__, chanerr, err_handled); 533 BUG(); 534 } 535 536 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 537 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); 538 539 /* mark faulting descriptor as complete */ 540 *chan->completion = desc->txd.phys; 541 542 spin_lock_bh(&ioat->prep_lock); 543 ioat3_restart_channel(ioat); 544 spin_unlock_bh(&ioat->prep_lock); 545} 546 547static void check_active(struct ioat2_dma_chan *ioat) 548{ 549 struct ioat_chan_common *chan = &ioat->base; 550 551 if (ioat2_ring_active(ioat)) { 552 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 553 return; 554 } 555 556 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) 557 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 558 else if (ioat->alloc_order > ioat_get_alloc_order()) { 559 /* if the ring is idle, empty, and oversized try to step 560 * down the size 561 */ 562 reshape_ring(ioat, ioat->alloc_order - 1); 563 564 /* keep shrinking until we get back to our minimum 565 * default size 566 */ 567 if (ioat->alloc_order > ioat_get_alloc_order()) 568 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 569 } 570 571} 572 573static void ioat3_timer_event(unsigned long data) 574{ 575 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 576 struct ioat_chan_common *chan = &ioat->base; 577 dma_addr_t phys_complete; 578 u64 status; 579 580 status = ioat_chansts(chan); 581 582 /* when halted due to errors check for channel 583 * programming errors before advancing the completion state 584 */ 585 if (is_ioat_halted(status)) { 586 u32 chanerr; 587 588 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 589 dev_err(to_dev(chan), "%s: Channel halted (%x)\n", 590 __func__, chanerr); 591 if (test_bit(IOAT_RUN, &chan->state)) 592 BUG_ON(is_ioat_bug(chanerr)); 593 else /* we never got off the ground */ 594 return; 595 } 596 597 /* if we haven't made progress and we have already 598 * acknowledged a pending completion once, then be more 599 * forceful with a restart 600 */ 601 spin_lock_bh(&chan->cleanup_lock); 602 if (ioat_cleanup_preamble(chan, &phys_complete)) 603 __cleanup(ioat, phys_complete); 604 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { 605 spin_lock_bh(&ioat->prep_lock); 606 ioat3_restart_channel(ioat); 607 spin_unlock_bh(&ioat->prep_lock); 608 spin_unlock_bh(&chan->cleanup_lock); 609 return; 610 } else { 611 set_bit(IOAT_COMPLETION_ACK, &chan->state); 612 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 613 } 614 615 616 if (ioat2_ring_active(ioat)) 617 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 618 else { 619 spin_lock_bh(&ioat->prep_lock); 620 check_active(ioat); 621 spin_unlock_bh(&ioat->prep_lock); 622 } 623 spin_unlock_bh(&chan->cleanup_lock); 624} 625 626static enum dma_status 627ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, 628 struct dma_tx_state *txstate) 629{ 630 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 631 enum dma_status ret; 632 633 ret = dma_cookie_status(c, cookie, txstate); 634 if (ret == DMA_COMPLETE) 635 return ret; 636 637 ioat3_cleanup(ioat); 638 639 return dma_cookie_status(c, cookie, txstate); 640} 641 642static struct dma_async_tx_descriptor * 643__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, 644 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, 645 size_t len, unsigned long flags) 646{ 647 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 648 struct ioat_ring_ent *compl_desc; 649 struct ioat_ring_ent *desc; 650 struct ioat_ring_ent *ext; 651 size_t total_len = len; 652 struct ioat_xor_descriptor *xor; 653 struct ioat_xor_ext_descriptor *xor_ex = NULL; 654 struct ioat_dma_descriptor *hw; 655 int num_descs, with_ext, idx, i; 656 u32 offset = 0; 657 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; 658 659 BUG_ON(src_cnt < 2); 660 661 num_descs = ioat2_xferlen_to_descs(ioat, len); 662 /* we need 2x the number of descriptors to cover greater than 5 663 * sources 664 */ 665 if (src_cnt > 5) { 666 with_ext = 1; 667 num_descs *= 2; 668 } else 669 with_ext = 0; 670 671 /* completion writes from the raid engine may pass completion 672 * writes from the legacy engine, so we need one extra null 673 * (legacy) descriptor to ensure all completion writes arrive in 674 * order. 675 */ 676 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) 677 idx = ioat->head; 678 else 679 return NULL; 680 i = 0; 681 do { 682 struct ioat_raw_descriptor *descs[2]; 683 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 684 int s; 685 686 desc = ioat2_get_ring_ent(ioat, idx + i); 687 xor = desc->xor; 688 689 /* save a branch by unconditionally retrieving the 690 * extended descriptor xor_set_src() knows to not write 691 * to it in the single descriptor case 692 */ 693 ext = ioat2_get_ring_ent(ioat, idx + i + 1); 694 xor_ex = ext->xor_ex; 695 696 descs[0] = (struct ioat_raw_descriptor *) xor; 697 descs[1] = (struct ioat_raw_descriptor *) xor_ex; 698 for (s = 0; s < src_cnt; s++) 699 xor_set_src(descs, src[s], offset, s); 700 xor->size = xfer_size; 701 xor->dst_addr = dest + offset; 702 xor->ctl = 0; 703 xor->ctl_f.op = op; 704 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); 705 706 len -= xfer_size; 707 offset += xfer_size; 708 dump_desc_dbg(ioat, desc); 709 } while ((i += 1 + with_ext) < num_descs); 710 711 /* last xor descriptor carries the unmap parameters and fence bit */ 712 desc->txd.flags = flags; 713 desc->len = total_len; 714 if (result) 715 desc->result = result; 716 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 717 718 /* completion descriptor carries interrupt bit */ 719 compl_desc = ioat2_get_ring_ent(ioat, idx + i); 720 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 721 hw = compl_desc->hw; 722 hw->ctl = 0; 723 hw->ctl_f.null = 1; 724 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 725 hw->ctl_f.compl_write = 1; 726 hw->size = NULL_DESC_BUFFER_SIZE; 727 dump_desc_dbg(ioat, compl_desc); 728 729 /* we leave the channel locked to ensure in order submission */ 730 return &compl_desc->txd; 731} 732 733static struct dma_async_tx_descriptor * 734ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 735 unsigned int src_cnt, size_t len, unsigned long flags) 736{ 737 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); 738} 739 740struct dma_async_tx_descriptor * 741ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 742 unsigned int src_cnt, size_t len, 743 enum sum_check_flags *result, unsigned long flags) 744{ 745 /* the cleanup routine only sets bits on validate failure, it 746 * does not clear bits on validate success... so clear it here 747 */ 748 *result = 0; 749 750 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], 751 src_cnt - 1, len, flags); 752} 753 754static void 755dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) 756{ 757 struct device *dev = to_dev(&ioat->base); 758 struct ioat_pq_descriptor *pq = desc->pq; 759 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; 760 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; 761 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); 762 int i; 763 764 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" 765 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" 766 " src_cnt: %d)\n", 767 desc_id(desc), (unsigned long long) desc->txd.phys, 768 (unsigned long long) (pq_ex ? pq_ex->next : pq->next), 769 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, 770 pq->ctl_f.compl_write, 771 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", 772 pq->ctl_f.src_cnt); 773 for (i = 0; i < src_cnt; i++) 774 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, 775 (unsigned long long) pq_get_src(descs, i), pq->coef[i]); 776 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); 777 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); 778 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); 779} 780 781static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, 782 struct ioat_ring_ent *desc) 783{ 784 struct device *dev = to_dev(&ioat->base); 785 struct ioat_pq_descriptor *pq = desc->pq; 786 struct ioat_raw_descriptor *descs[] = { (void *)pq, 787 (void *)pq, 788 (void *)pq }; 789 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); 790 int i; 791 792 if (desc->sed) { 793 descs[1] = (void *)desc->sed->hw; 794 descs[2] = (void *)desc->sed->hw + 64; 795 } 796 797 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" 798 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" 799 " src_cnt: %d)\n", 800 desc_id(desc), (unsigned long long) desc->txd.phys, 801 (unsigned long long) pq->next, 802 desc->txd.flags, pq->size, pq->ctl, 803 pq->ctl_f.op, pq->ctl_f.int_en, 804 pq->ctl_f.compl_write, 805 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", 806 pq->ctl_f.src_cnt); 807 for (i = 0; i < src_cnt; i++) { 808 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, 809 (unsigned long long) pq16_get_src(descs, i), 810 pq->coef[i]); 811 } 812 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); 813 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); 814} 815 816static struct dma_async_tx_descriptor * 817__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, 818 const dma_addr_t *dst, const dma_addr_t *src, 819 unsigned int src_cnt, const unsigned char *scf, 820 size_t len, unsigned long flags) 821{ 822 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 823 struct ioat_chan_common *chan = &ioat->base; 824 struct ioatdma_device *device = chan->device; 825 struct ioat_ring_ent *compl_desc; 826 struct ioat_ring_ent *desc; 827 struct ioat_ring_ent *ext; 828 size_t total_len = len; 829 struct ioat_pq_descriptor *pq; 830 struct ioat_pq_ext_descriptor *pq_ex = NULL; 831 struct ioat_dma_descriptor *hw; 832 u32 offset = 0; 833 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 834 int i, s, idx, with_ext, num_descs; 835 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; 836 837 dev_dbg(to_dev(chan), "%s\n", __func__); 838 /* the engine requires at least two sources (we provide 839 * at least 1 implied source in the DMA_PREP_CONTINUE case) 840 */ 841 BUG_ON(src_cnt + dmaf_continue(flags) < 2); 842 843 num_descs = ioat2_xferlen_to_descs(ioat, len); 844 /* we need 2x the number of descriptors to cover greater than 3 845 * sources (we need 1 extra source in the q-only continuation 846 * case and 3 extra sources in the p+q continuation case. 847 */ 848 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || 849 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { 850 with_ext = 1; 851 num_descs *= 2; 852 } else 853 with_ext = 0; 854 855 /* completion writes from the raid engine may pass completion 856 * writes from the legacy engine, so we need one extra null 857 * (legacy) descriptor to ensure all completion writes arrive in 858 * order. 859 */ 860 if (likely(num_descs) && 861 ioat2_check_space_lock(ioat, num_descs + cb32) == 0) 862 idx = ioat->head; 863 else 864 return NULL; 865 i = 0; 866 do { 867 struct ioat_raw_descriptor *descs[2]; 868 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 869 870 desc = ioat2_get_ring_ent(ioat, idx + i); 871 pq = desc->pq; 872 873 /* save a branch by unconditionally retrieving the 874 * extended descriptor pq_set_src() knows to not write 875 * to it in the single descriptor case 876 */ 877 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); 878 pq_ex = ext->pq_ex; 879 880 descs[0] = (struct ioat_raw_descriptor *) pq; 881 descs[1] = (struct ioat_raw_descriptor *) pq_ex; 882 883 for (s = 0; s < src_cnt; s++) 884 pq_set_src(descs, src[s], offset, scf[s], s); 885 886 /* see the comment for dma_maxpq in include/linux/dmaengine.h */ 887 if (dmaf_p_disabled_continue(flags)) 888 pq_set_src(descs, dst[1], offset, 1, s++); 889 else if (dmaf_continue(flags)) { 890 pq_set_src(descs, dst[0], offset, 0, s++); 891 pq_set_src(descs, dst[1], offset, 1, s++); 892 pq_set_src(descs, dst[1], offset, 0, s++); 893 } 894 pq->size = xfer_size; 895 pq->p_addr = dst[0] + offset; 896 pq->q_addr = dst[1] + offset; 897 pq->ctl = 0; 898 pq->ctl_f.op = op; 899 /* we turn on descriptor write back error status */ 900 if (device->cap & IOAT_CAP_DWBES) 901 pq->ctl_f.wb_en = result ? 1 : 0; 902 pq->ctl_f.src_cnt = src_cnt_to_hw(s); 903 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); 904 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); 905 906 len -= xfer_size; 907 offset += xfer_size; 908 } while ((i += 1 + with_ext) < num_descs); 909 910 /* last pq descriptor carries the unmap parameters and fence bit */ 911 desc->txd.flags = flags; 912 desc->len = total_len; 913 if (result) 914 desc->result = result; 915 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 916 dump_pq_desc_dbg(ioat, desc, ext); 917 918 if (!cb32) { 919 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 920 pq->ctl_f.compl_write = 1; 921 compl_desc = desc; 922 } else { 923 /* completion descriptor carries interrupt bit */ 924 compl_desc = ioat2_get_ring_ent(ioat, idx + i); 925 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 926 hw = compl_desc->hw; 927 hw->ctl = 0; 928 hw->ctl_f.null = 1; 929 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 930 hw->ctl_f.compl_write = 1; 931 hw->size = NULL_DESC_BUFFER_SIZE; 932 dump_desc_dbg(ioat, compl_desc); 933 } 934 935 936 /* we leave the channel locked to ensure in order submission */ 937 return &compl_desc->txd; 938} 939 940static struct dma_async_tx_descriptor * 941__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, 942 const dma_addr_t *dst, const dma_addr_t *src, 943 unsigned int src_cnt, const unsigned char *scf, 944 size_t len, unsigned long flags) 945{ 946 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 947 struct ioat_chan_common *chan = &ioat->base; 948 struct ioatdma_device *device = chan->device; 949 struct ioat_ring_ent *desc; 950 size_t total_len = len; 951 struct ioat_pq_descriptor *pq; 952 u32 offset = 0; 953 u8 op; 954 int i, s, idx, num_descs; 955 956 /* this function is only called with 9-16 sources */ 957 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; 958 959 dev_dbg(to_dev(chan), "%s\n", __func__); 960 961 num_descs = ioat2_xferlen_to_descs(ioat, len); 962 963 /* 964 * 16 source pq is only available on cb3.3 and has no completion 965 * write hw bug. 966 */ 967 if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) 968 idx = ioat->head; 969 else 970 return NULL; 971 972 i = 0; 973 974 do { 975 struct ioat_raw_descriptor *descs[4]; 976 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 977 978 desc = ioat2_get_ring_ent(ioat, idx + i); 979 pq = desc->pq; 980 981 descs[0] = (struct ioat_raw_descriptor *) pq; 982 983 desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); 984 if (!desc->sed) { 985 dev_err(to_dev(chan), 986 "%s: no free sed entries\n", __func__); 987 return NULL; 988 } 989 990 pq->sed_addr = desc->sed->dma; 991 desc->sed->parent = desc; 992 993 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; 994 descs[2] = (void *)descs[1] + 64; 995 996 for (s = 0; s < src_cnt; s++) 997 pq16_set_src(descs, src[s], offset, scf[s], s); 998 999 /* see the comment for dma_maxpq in include/linux/dmaengine.h */ 1000 if (dmaf_p_disabled_continue(flags)) 1001 pq16_set_src(descs, dst[1], offset, 1, s++); 1002 else if (dmaf_continue(flags)) { 1003 pq16_set_src(descs, dst[0], offset, 0, s++); 1004 pq16_set_src(descs, dst[1], offset, 1, s++); 1005 pq16_set_src(descs, dst[1], offset, 0, s++); 1006 } 1007 1008 pq->size = xfer_size; 1009 pq->p_addr = dst[0] + offset; 1010 pq->q_addr = dst[1] + offset; 1011 pq->ctl = 0; 1012 pq->ctl_f.op = op; 1013 pq->ctl_f.src_cnt = src16_cnt_to_hw(s); 1014 /* we turn on descriptor write back error status */ 1015 if (device->cap & IOAT_CAP_DWBES) 1016 pq->ctl_f.wb_en = result ? 1 : 0; 1017 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); 1018 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); 1019 1020 len -= xfer_size; 1021 offset += xfer_size; 1022 } while (++i < num_descs); 1023 1024 /* last pq descriptor carries the unmap parameters and fence bit */ 1025 desc->txd.flags = flags; 1026 desc->len = total_len; 1027 if (result) 1028 desc->result = result; 1029 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 1030 1031 /* with cb3.3 we should be able to do completion w/o a null desc */ 1032 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 1033 pq->ctl_f.compl_write = 1; 1034 1035 dump_pq16_desc_dbg(ioat, desc); 1036 1037 /* we leave the channel locked to ensure in order submission */ 1038 return &desc->txd; 1039} 1040 1041static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) 1042{ 1043 if (dmaf_p_disabled_continue(flags)) 1044 return src_cnt + 1; 1045 else if (dmaf_continue(flags)) 1046 return src_cnt + 3; 1047 else 1048 return src_cnt; 1049} 1050 1051static struct dma_async_tx_descriptor * 1052ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 1053 unsigned int src_cnt, const unsigned char *scf, size_t len, 1054 unsigned long flags) 1055{ 1056 /* specify valid address for disabled result */ 1057 if (flags & DMA_PREP_PQ_DISABLE_P) 1058 dst[0] = dst[1]; 1059 if (flags & DMA_PREP_PQ_DISABLE_Q) 1060 dst[1] = dst[0]; 1061 1062 /* handle the single source multiply case from the raid6 1063 * recovery path 1064 */ 1065 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { 1066 dma_addr_t single_source[2]; 1067 unsigned char single_source_coef[2]; 1068 1069 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); 1070 single_source[0] = src[0]; 1071 single_source[1] = src[0]; 1072 single_source_coef[0] = scf[0]; 1073 single_source_coef[1] = 0; 1074 1075 return src_cnt_flags(src_cnt, flags) > 8 ? 1076 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 1077 2, single_source_coef, len, 1078 flags) : 1079 __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, 1080 single_source_coef, len, flags); 1081 1082 } else { 1083 return src_cnt_flags(src_cnt, flags) > 8 ? 1084 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, 1085 scf, len, flags) : 1086 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, 1087 scf, len, flags); 1088 } 1089} 1090 1091struct dma_async_tx_descriptor * 1092ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 1093 unsigned int src_cnt, const unsigned char *scf, size_t len, 1094 enum sum_check_flags *pqres, unsigned long flags) 1095{ 1096 /* specify valid address for disabled result */ 1097 if (flags & DMA_PREP_PQ_DISABLE_P) 1098 pq[0] = pq[1]; 1099 if (flags & DMA_PREP_PQ_DISABLE_Q) 1100 pq[1] = pq[0]; 1101 1102 /* the cleanup routine only sets bits on validate failure, it 1103 * does not clear bits on validate success... so clear it here 1104 */ 1105 *pqres = 0; 1106 1107 return src_cnt_flags(src_cnt, flags) > 8 ? 1108 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, 1109 flags) : 1110 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, 1111 flags); 1112} 1113 1114static struct dma_async_tx_descriptor * 1115ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 1116 unsigned int src_cnt, size_t len, unsigned long flags) 1117{ 1118 unsigned char scf[src_cnt]; 1119 dma_addr_t pq[2]; 1120 1121 memset(scf, 0, src_cnt); 1122 pq[0] = dst; 1123 flags |= DMA_PREP_PQ_DISABLE_Q; 1124 pq[1] = dst; /* specify valid address for disabled result */ 1125 1126 return src_cnt_flags(src_cnt, flags) > 8 ? 1127 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, 1128 flags) : 1129 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 1130 flags); 1131} 1132 1133struct dma_async_tx_descriptor * 1134ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 1135 unsigned int src_cnt, size_t len, 1136 enum sum_check_flags *result, unsigned long flags) 1137{ 1138 unsigned char scf[src_cnt]; 1139 dma_addr_t pq[2]; 1140 1141 /* the cleanup routine only sets bits on validate failure, it 1142 * does not clear bits on validate success... so clear it here 1143 */ 1144 *result = 0; 1145 1146 memset(scf, 0, src_cnt); 1147 pq[0] = src[0]; 1148 flags |= DMA_PREP_PQ_DISABLE_Q; 1149 pq[1] = pq[0]; /* specify valid address for disabled result */ 1150 1151 return src_cnt_flags(src_cnt, flags) > 8 ? 1152 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, 1153 scf, len, flags) : 1154 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, 1155 scf, len, flags); 1156} 1157 1158static struct dma_async_tx_descriptor * 1159ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) 1160{ 1161 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 1162 struct ioat_ring_ent *desc; 1163 struct ioat_dma_descriptor *hw; 1164 1165 if (ioat2_check_space_lock(ioat, 1) == 0) 1166 desc = ioat2_get_ring_ent(ioat, ioat->head); 1167 else 1168 return NULL; 1169 1170 hw = desc->hw; 1171 hw->ctl = 0; 1172 hw->ctl_f.null = 1; 1173 hw->ctl_f.int_en = 1; 1174 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 1175 hw->ctl_f.compl_write = 1; 1176 hw->size = NULL_DESC_BUFFER_SIZE; 1177 hw->src_addr = 0; 1178 hw->dst_addr = 0; 1179 1180 desc->txd.flags = flags; 1181 desc->len = 1; 1182 1183 dump_desc_dbg(ioat, desc); 1184 1185 /* we leave the channel locked to ensure in order submission */ 1186 return &desc->txd; 1187} 1188 1189static void ioat3_dma_test_callback(void *dma_async_param) 1190{ 1191 struct completion *cmp = dma_async_param; 1192 1193 complete(cmp); 1194} 1195 1196#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ 1197static int ioat_xor_val_self_test(struct ioatdma_device *device) 1198{ 1199 int i, src_idx; 1200 struct page *dest; 1201 struct page *xor_srcs[IOAT_NUM_SRC_TEST]; 1202 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; 1203 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; 1204 dma_addr_t dest_dma; 1205 struct dma_async_tx_descriptor *tx; 1206 struct dma_chan *dma_chan; 1207 dma_cookie_t cookie; 1208 u8 cmp_byte = 0; 1209 u32 cmp_word; 1210 u32 xor_val_result; 1211 int err = 0; 1212 struct completion cmp; 1213 unsigned long tmo; 1214 struct device *dev = &device->pdev->dev; 1215 struct dma_device *dma = &device->common; 1216 u8 op = 0; 1217 1218 dev_dbg(dev, "%s\n", __func__); 1219 1220 if (!dma_has_cap(DMA_XOR, dma->cap_mask)) 1221 return 0; 1222 1223 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 1224 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 1225 if (!xor_srcs[src_idx]) { 1226 while (src_idx--) 1227 __free_page(xor_srcs[src_idx]); 1228 return -ENOMEM; 1229 } 1230 } 1231 1232 dest = alloc_page(GFP_KERNEL); 1233 if (!dest) { 1234 while (src_idx--) 1235 __free_page(xor_srcs[src_idx]); 1236 return -ENOMEM; 1237 } 1238 1239 /* Fill in src buffers */ 1240 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 1241 u8 *ptr = page_address(xor_srcs[src_idx]); 1242 for (i = 0; i < PAGE_SIZE; i++) 1243 ptr[i] = (1 << src_idx); 1244 } 1245 1246 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) 1247 cmp_byte ^= (u8) (1 << src_idx); 1248 1249 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1250 (cmp_byte << 8) | cmp_byte; 1251 1252 memset(page_address(dest), 0, PAGE_SIZE); 1253 1254 dma_chan = container_of(dma->channels.next, struct dma_chan, 1255 device_node); 1256 if (dma->device_alloc_chan_resources(dma_chan) < 1) { 1257 err = -ENODEV; 1258 goto out; 1259 } 1260 1261 /* test xor */ 1262 op = IOAT_OP_XOR; 1263 1264 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1265 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1266 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 1267 DMA_TO_DEVICE); 1268 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1269 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1270 DMA_PREP_INTERRUPT); 1271 1272 if (!tx) { 1273 dev_err(dev, "Self-test xor prep failed\n"); 1274 err = -ENODEV; 1275 goto dma_unmap; 1276 } 1277 1278 async_tx_ack(tx); 1279 init_completion(&cmp); 1280 tx->callback = ioat3_dma_test_callback; 1281 tx->callback_param = &cmp; 1282 cookie = tx->tx_submit(tx); 1283 if (cookie < 0) { 1284 dev_err(dev, "Self-test xor setup failed\n"); 1285 err = -ENODEV; 1286 goto dma_unmap; 1287 } 1288 dma->device_issue_pending(dma_chan); 1289 1290 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1291 1292 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1293 dev_err(dev, "Self-test xor timed out\n"); 1294 err = -ENODEV; 1295 goto dma_unmap; 1296 } 1297 1298 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1299 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1300 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1301 1302 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1303 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1304 u32 *ptr = page_address(dest); 1305 if (ptr[i] != cmp_word) { 1306 dev_err(dev, "Self-test xor failed compare\n"); 1307 err = -ENODEV; 1308 goto free_resources; 1309 } 1310 } 1311 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1312 1313 /* skip validate if the capability is not present */ 1314 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1315 goto free_resources; 1316 1317 op = IOAT_OP_XOR_VAL; 1318 1319 /* validate the sources with the destintation page */ 1320 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1321 xor_val_srcs[i] = xor_srcs[i]; 1322 xor_val_srcs[i] = dest; 1323 1324 xor_val_result = 1; 1325 1326 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1327 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1328 DMA_TO_DEVICE); 1329 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1330 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1331 &xor_val_result, DMA_PREP_INTERRUPT); 1332 if (!tx) { 1333 dev_err(dev, "Self-test zero prep failed\n"); 1334 err = -ENODEV; 1335 goto dma_unmap; 1336 } 1337 1338 async_tx_ack(tx); 1339 init_completion(&cmp); 1340 tx->callback = ioat3_dma_test_callback; 1341 tx->callback_param = &cmp; 1342 cookie = tx->tx_submit(tx); 1343 if (cookie < 0) { 1344 dev_err(dev, "Self-test zero setup failed\n"); 1345 err = -ENODEV; 1346 goto dma_unmap; 1347 } 1348 dma->device_issue_pending(dma_chan); 1349 1350 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1351 1352 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1353 dev_err(dev, "Self-test validate timed out\n"); 1354 err = -ENODEV; 1355 goto dma_unmap; 1356 } 1357 1358 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1359 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1360 1361 if (xor_val_result != 0) { 1362 dev_err(dev, "Self-test validate failed compare\n"); 1363 err = -ENODEV; 1364 goto free_resources; 1365 } 1366 1367 memset(page_address(dest), 0, PAGE_SIZE); 1368 1369 /* test for non-zero parity sum */ 1370 op = IOAT_OP_XOR_VAL; 1371 1372 xor_val_result = 0; 1373 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1374 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1375 DMA_TO_DEVICE); 1376 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1377 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1378 &xor_val_result, DMA_PREP_INTERRUPT); 1379 if (!tx) { 1380 dev_err(dev, "Self-test 2nd zero prep failed\n"); 1381 err = -ENODEV; 1382 goto dma_unmap; 1383 } 1384 1385 async_tx_ack(tx); 1386 init_completion(&cmp); 1387 tx->callback = ioat3_dma_test_callback; 1388 tx->callback_param = &cmp; 1389 cookie = tx->tx_submit(tx); 1390 if (cookie < 0) { 1391 dev_err(dev, "Self-test 2nd zero setup failed\n"); 1392 err = -ENODEV; 1393 goto dma_unmap; 1394 } 1395 dma->device_issue_pending(dma_chan); 1396 1397 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1398 1399 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1400 dev_err(dev, "Self-test 2nd validate timed out\n"); 1401 err = -ENODEV; 1402 goto dma_unmap; 1403 } 1404 1405 if (xor_val_result != SUM_CHECK_P_RESULT) { 1406 dev_err(dev, "Self-test validate failed compare\n"); 1407 err = -ENODEV; 1408 goto dma_unmap; 1409 } 1410 1411 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1412 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1413 1414 goto free_resources; 1415dma_unmap: 1416 if (op == IOAT_OP_XOR) { 1417 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1418 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1419 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1420 DMA_TO_DEVICE); 1421 } else if (op == IOAT_OP_XOR_VAL) { 1422 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1423 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1424 DMA_TO_DEVICE); 1425 } 1426free_resources: 1427 dma->device_free_chan_resources(dma_chan); 1428out: 1429 src_idx = IOAT_NUM_SRC_TEST; 1430 while (src_idx--) 1431 __free_page(xor_srcs[src_idx]); 1432 __free_page(dest); 1433 return err; 1434} 1435 1436static int ioat3_dma_self_test(struct ioatdma_device *device) 1437{ 1438 int rc = ioat_dma_self_test(device); 1439 1440 if (rc) 1441 return rc; 1442 1443 rc = ioat_xor_val_self_test(device); 1444 if (rc) 1445 return rc; 1446 1447 return 0; 1448} 1449 1450static int ioat3_irq_reinit(struct ioatdma_device *device) 1451{ 1452 struct pci_dev *pdev = device->pdev; 1453 int irq = pdev->irq, i; 1454 1455 if (!is_bwd_ioat(pdev)) 1456 return 0; 1457 1458 switch (device->irq_mode) { 1459 case IOAT_MSIX: 1460 for (i = 0; i < device->common.chancnt; i++) { 1461 struct msix_entry *msix = &device->msix_entries[i]; 1462 struct ioat_chan_common *chan; 1463 1464 chan = ioat_chan_by_index(device, i); 1465 devm_free_irq(&pdev->dev, msix->vector, chan); 1466 } 1467 1468 pci_disable_msix(pdev); 1469 break; 1470 case IOAT_MSI: 1471 pci_disable_msi(pdev); 1472 /* fall through */ 1473 case IOAT_INTX: 1474 devm_free_irq(&pdev->dev, irq, device); 1475 break; 1476 default: 1477 return 0; 1478 } 1479 device->irq_mode = IOAT_NOIRQ; 1480 1481 return ioat_dma_setup_interrupts(device); 1482} 1483 1484static int ioat3_reset_hw(struct ioat_chan_common *chan) 1485{ 1486 /* throw away whatever the channel was doing and get it 1487 * initialized, with ioat3 specific workarounds 1488 */ 1489 struct ioatdma_device *device = chan->device; 1490 struct pci_dev *pdev = device->pdev; 1491 u32 chanerr; 1492 u16 dev_id; 1493 int err; 1494 1495 ioat2_quiesce(chan, msecs_to_jiffies(100)); 1496 1497 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 1498 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 1499 1500 if (device->version < IOAT_VER_3_3) { 1501 /* clear any pending errors */ 1502 err = pci_read_config_dword(pdev, 1503 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); 1504 if (err) { 1505 dev_err(&pdev->dev, 1506 "channel error register unreachable\n"); 1507 return err; 1508 } 1509 pci_write_config_dword(pdev, 1510 IOAT_PCI_CHANERR_INT_OFFSET, chanerr); 1511 1512 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit 1513 * (workaround for spurious config parity error after restart) 1514 */ 1515 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); 1516 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { 1517 pci_write_config_dword(pdev, 1518 IOAT_PCI_DMAUNCERRSTS_OFFSET, 1519 0x10); 1520 } 1521 } 1522 1523 err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1524 if (!err) 1525 err = ioat3_irq_reinit(device); 1526 1527 if (err) 1528 dev_err(&pdev->dev, "Failed to reset: %d\n", err); 1529 1530 return err; 1531} 1532 1533static void ioat3_intr_quirk(struct ioatdma_device *device) 1534{ 1535 struct dma_device *dma; 1536 struct dma_chan *c; 1537 struct ioat_chan_common *chan; 1538 u32 errmask; 1539 1540 dma = &device->common; 1541 1542 /* 1543 * if we have descriptor write back error status, we mask the 1544 * error interrupts 1545 */ 1546 if (device->cap & IOAT_CAP_DWBES) { 1547 list_for_each_entry(c, &dma->channels, device_node) { 1548 chan = to_chan_common(c); 1549 errmask = readl(chan->reg_base + 1550 IOAT_CHANERR_MASK_OFFSET); 1551 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | 1552 IOAT_CHANERR_XOR_Q_ERR; 1553 writel(errmask, chan->reg_base + 1554 IOAT_CHANERR_MASK_OFFSET); 1555 } 1556 } 1557} 1558 1559int ioat3_dma_probe(struct ioatdma_device *device, int dca) 1560{ 1561 struct pci_dev *pdev = device->pdev; 1562 int dca_en = system_has_dca_enabled(pdev); 1563 struct dma_device *dma; 1564 struct dma_chan *c; 1565 struct ioat_chan_common *chan; 1566 bool is_raid_device = false; 1567 int err; 1568 1569 device->enumerate_channels = ioat2_enumerate_channels; 1570 device->reset_hw = ioat3_reset_hw; 1571 device->self_test = ioat3_dma_self_test; 1572 device->intr_quirk = ioat3_intr_quirk; 1573 dma = &device->common; 1574 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1575 dma->device_issue_pending = ioat2_issue_pending; 1576 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1577 dma->device_free_chan_resources = ioat2_free_chan_resources; 1578 1579 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1580 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1581 1582 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1583 1584 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) 1585 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); 1586 1587 /* dca is incompatible with raid operations */ 1588 if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) 1589 device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); 1590 1591 if (device->cap & IOAT_CAP_XOR) { 1592 is_raid_device = true; 1593 dma->max_xor = 8; 1594 1595 dma_cap_set(DMA_XOR, dma->cap_mask); 1596 dma->device_prep_dma_xor = ioat3_prep_xor; 1597 1598 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1599 dma->device_prep_dma_xor_val = ioat3_prep_xor_val; 1600 } 1601 1602 if (device->cap & IOAT_CAP_PQ) { 1603 is_raid_device = true; 1604 1605 dma->device_prep_dma_pq = ioat3_prep_pq; 1606 dma->device_prep_dma_pq_val = ioat3_prep_pq_val; 1607 dma_cap_set(DMA_PQ, dma->cap_mask); 1608 dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1609 1610 if (device->cap & IOAT_CAP_RAID16SS) { 1611 dma_set_maxpq(dma, 16, 0); 1612 } else { 1613 dma_set_maxpq(dma, 8, 0); 1614 } 1615 1616 if (!(device->cap & IOAT_CAP_XOR)) { 1617 dma->device_prep_dma_xor = ioat3_prep_pqxor; 1618 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; 1619 dma_cap_set(DMA_XOR, dma->cap_mask); 1620 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1621 1622 if (device->cap & IOAT_CAP_RAID16SS) { 1623 dma->max_xor = 16; 1624 } else { 1625 dma->max_xor = 8; 1626 } 1627 } 1628 } 1629 1630 dma->device_tx_status = ioat3_tx_status; 1631 device->cleanup_fn = ioat3_cleanup_event; 1632 device->timer_fn = ioat3_timer_event; 1633 1634 /* starting with CB3.3 super extended descriptors are supported */ 1635 if (device->cap & IOAT_CAP_RAID16SS) { 1636 char pool_name[14]; 1637 int i; 1638 1639 for (i = 0; i < MAX_SED_POOLS; i++) { 1640 snprintf(pool_name, 14, "ioat_hw%d_sed", i); 1641 1642 /* allocate SED DMA pool */ 1643 device->sed_hw_pool[i] = dmam_pool_create(pool_name, 1644 &pdev->dev, 1645 SED_SIZE * (i + 1), 64, 0); 1646 if (!device->sed_hw_pool[i]) 1647 return -ENOMEM; 1648 1649 } 1650 } 1651 1652 err = ioat_probe(device); 1653 if (err) 1654 return err; 1655 ioat_set_tcp_copy_break(262144); 1656 1657 list_for_each_entry(c, &dma->channels, device_node) { 1658 chan = to_chan_common(c); 1659 writel(IOAT_DMA_DCA_ANY_CPU, 1660 chan->reg_base + IOAT_DCACTRL_OFFSET); 1661 } 1662 1663 err = ioat_register(device); 1664 if (err) 1665 return err; 1666 1667 ioat_kobject_add(device, &ioat2_ktype); 1668 1669 if (dca) 1670 device->dca = ioat3_dca_init(pdev, device->reg_base); 1671 1672 return 0; 1673}