Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc6 1425 lines 40 kB view raw
1/* 2 * offload engine driver for the Intel Xscale series of i/o processors 3 * Copyright © 2006, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 */ 19 20/* 21 * This driver supports the asynchrounous DMA copy and RAID engines available 22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) 23 */ 24 25#include <linux/init.h> 26#include <linux/module.h> 27#include <linux/delay.h> 28#include <linux/dma-mapping.h> 29#include <linux/spinlock.h> 30#include <linux/interrupt.h> 31#include <linux/platform_device.h> 32#include <linux/memory.h> 33#include <linux/ioport.h> 34 35#include <mach/adma.h> 36 37#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 38#define to_iop_adma_device(dev) \ 39 container_of(dev, struct iop_adma_device, common) 40#define tx_to_iop_adma_slot(tx) \ 41 container_of(tx, struct iop_adma_desc_slot, async_tx) 42 43/** 44 * iop_adma_free_slots - flags descriptor slots for reuse 45 * @slot: Slot to free 46 * Caller must hold &iop_chan->lock while calling this function 47 */ 48static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) 49{ 50 int stride = slot->slots_per_op; 51 52 while (stride--) { 53 slot->slots_per_op = 0; 54 slot = list_entry(slot->slot_node.next, 55 struct iop_adma_desc_slot, 56 slot_node); 57 } 58} 59 60static dma_cookie_t 61iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, 62 struct iop_adma_chan *iop_chan, dma_cookie_t cookie) 63{ 64 BUG_ON(desc->async_tx.cookie < 0); 65 if (desc->async_tx.cookie > 0) { 66 cookie = desc->async_tx.cookie; 67 desc->async_tx.cookie = 0; 68 69 /* call the callback (must not sleep or submit new 70 * operations to this channel) 71 */ 72 if (desc->async_tx.callback) 73 desc->async_tx.callback( 74 desc->async_tx.callback_param); 75 76 /* unmap dma addresses 77 * (unmap_single vs unmap_page?) 78 */ 79 if (desc->group_head && desc->unmap_len) { 80 struct iop_adma_desc_slot *unmap = desc->group_head; 81 struct device *dev = 82 &iop_chan->device->pdev->dev; 83 u32 len = unmap->unmap_len; 84 enum dma_ctrl_flags flags = desc->async_tx.flags; 85 u32 src_cnt; 86 dma_addr_t addr; 87 dma_addr_t dest; 88 89 src_cnt = unmap->unmap_src_cnt; 90 dest = iop_desc_get_dest_addr(unmap, iop_chan); 91 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 92 enum dma_data_direction dir; 93 94 if (src_cnt > 1) /* is xor? */ 95 dir = DMA_BIDIRECTIONAL; 96 else 97 dir = DMA_FROM_DEVICE; 98 99 dma_unmap_page(dev, dest, len, dir); 100 } 101 102 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 103 while (src_cnt--) { 104 addr = iop_desc_get_src_addr(unmap, 105 iop_chan, 106 src_cnt); 107 if (addr == dest) 108 continue; 109 dma_unmap_page(dev, addr, len, 110 DMA_TO_DEVICE); 111 } 112 } 113 desc->group_head = NULL; 114 } 115 } 116 117 /* run dependent operations */ 118 dma_run_dependencies(&desc->async_tx); 119 120 return cookie; 121} 122 123static int 124iop_adma_clean_slot(struct iop_adma_desc_slot *desc, 125 struct iop_adma_chan *iop_chan) 126{ 127 /* the client is allowed to attach dependent operations 128 * until 'ack' is set 129 */ 130 if (!async_tx_test_ack(&desc->async_tx)) 131 return 0; 132 133 /* leave the last descriptor in the chain 134 * so we can append to it 135 */ 136 if (desc->chain_node.next == &iop_chan->chain) 137 return 1; 138 139 dev_dbg(iop_chan->device->common.dev, 140 "\tfree slot: %d slots_per_op: %d\n", 141 desc->idx, desc->slots_per_op); 142 143 list_del(&desc->chain_node); 144 iop_adma_free_slots(desc); 145 146 return 0; 147} 148 149static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 150{ 151 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; 152 dma_cookie_t cookie = 0; 153 u32 current_desc = iop_chan_get_current_descriptor(iop_chan); 154 int busy = iop_chan_is_busy(iop_chan); 155 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 156 157 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 158 /* free completed slots from the chain starting with 159 * the oldest descriptor 160 */ 161 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 162 chain_node) { 163 pr_debug("\tcookie: %d slot: %d busy: %d " 164 "this_desc: %#x next_desc: %#x ack: %d\n", 165 iter->async_tx.cookie, iter->idx, busy, 166 iter->async_tx.phys, iop_desc_get_next_desc(iter), 167 async_tx_test_ack(&iter->async_tx)); 168 prefetch(_iter); 169 prefetch(&_iter->async_tx); 170 171 /* do not advance past the current descriptor loaded into the 172 * hardware channel, subsequent descriptors are either in 173 * process or have not been submitted 174 */ 175 if (seen_current) 176 break; 177 178 /* stop the search if we reach the current descriptor and the 179 * channel is busy, or if it appears that the current descriptor 180 * needs to be re-read (i.e. has been appended to) 181 */ 182 if (iter->async_tx.phys == current_desc) { 183 BUG_ON(seen_current++); 184 if (busy || iop_desc_get_next_desc(iter)) 185 break; 186 } 187 188 /* detect the start of a group transaction */ 189 if (!slot_cnt && !slots_per_op) { 190 slot_cnt = iter->slot_cnt; 191 slots_per_op = iter->slots_per_op; 192 if (slot_cnt <= slots_per_op) { 193 slot_cnt = 0; 194 slots_per_op = 0; 195 } 196 } 197 198 if (slot_cnt) { 199 pr_debug("\tgroup++\n"); 200 if (!grp_start) 201 grp_start = iter; 202 slot_cnt -= slots_per_op; 203 } 204 205 /* all the members of a group are complete */ 206 if (slots_per_op != 0 && slot_cnt == 0) { 207 struct iop_adma_desc_slot *grp_iter, *_grp_iter; 208 int end_of_chain = 0; 209 pr_debug("\tgroup end\n"); 210 211 /* collect the total results */ 212 if (grp_start->xor_check_result) { 213 u32 zero_sum_result = 0; 214 slot_cnt = grp_start->slot_cnt; 215 grp_iter = grp_start; 216 217 list_for_each_entry_from(grp_iter, 218 &iop_chan->chain, chain_node) { 219 zero_sum_result |= 220 iop_desc_get_zero_result(grp_iter); 221 pr_debug("\titer%d result: %d\n", 222 grp_iter->idx, zero_sum_result); 223 slot_cnt -= slots_per_op; 224 if (slot_cnt == 0) 225 break; 226 } 227 pr_debug("\tgrp_start->xor_check_result: %p\n", 228 grp_start->xor_check_result); 229 *grp_start->xor_check_result = zero_sum_result; 230 } 231 232 /* clean up the group */ 233 slot_cnt = grp_start->slot_cnt; 234 grp_iter = grp_start; 235 list_for_each_entry_safe_from(grp_iter, _grp_iter, 236 &iop_chan->chain, chain_node) { 237 cookie = iop_adma_run_tx_complete_actions( 238 grp_iter, iop_chan, cookie); 239 240 slot_cnt -= slots_per_op; 241 end_of_chain = iop_adma_clean_slot(grp_iter, 242 iop_chan); 243 244 if (slot_cnt == 0 || end_of_chain) 245 break; 246 } 247 248 /* the group should be complete at this point */ 249 BUG_ON(slot_cnt); 250 251 slots_per_op = 0; 252 grp_start = NULL; 253 if (end_of_chain) 254 break; 255 else 256 continue; 257 } else if (slots_per_op) /* wait for group completion */ 258 continue; 259 260 /* write back zero sum results (single descriptor case) */ 261 if (iter->xor_check_result && iter->async_tx.cookie) 262 *iter->xor_check_result = 263 iop_desc_get_zero_result(iter); 264 265 cookie = iop_adma_run_tx_complete_actions( 266 iter, iop_chan, cookie); 267 268 if (iop_adma_clean_slot(iter, iop_chan)) 269 break; 270 } 271 272 if (cookie > 0) { 273 iop_chan->completed_cookie = cookie; 274 pr_debug("\tcompleted cookie %d\n", cookie); 275 } 276} 277 278static void 279iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 280{ 281 spin_lock_bh(&iop_chan->lock); 282 __iop_adma_slot_cleanup(iop_chan); 283 spin_unlock_bh(&iop_chan->lock); 284} 285 286static void iop_adma_tasklet(unsigned long data) 287{ 288 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; 289 290 spin_lock(&iop_chan->lock); 291 __iop_adma_slot_cleanup(iop_chan); 292 spin_unlock(&iop_chan->lock); 293} 294 295static struct iop_adma_desc_slot * 296iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, 297 int slots_per_op) 298{ 299 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; 300 LIST_HEAD(chain); 301 int slots_found, retry = 0; 302 303 /* start search from the last allocated descrtiptor 304 * if a contiguous allocation can not be found start searching 305 * from the beginning of the list 306 */ 307retry: 308 slots_found = 0; 309 if (retry == 0) 310 iter = iop_chan->last_used; 311 else 312 iter = list_entry(&iop_chan->all_slots, 313 struct iop_adma_desc_slot, 314 slot_node); 315 316 list_for_each_entry_safe_continue( 317 iter, _iter, &iop_chan->all_slots, slot_node) { 318 prefetch(_iter); 319 prefetch(&_iter->async_tx); 320 if (iter->slots_per_op) { 321 /* give up after finding the first busy slot 322 * on the second pass through the list 323 */ 324 if (retry) 325 break; 326 327 slots_found = 0; 328 continue; 329 } 330 331 /* start the allocation if the slot is correctly aligned */ 332 if (!slots_found++) { 333 if (iop_desc_is_aligned(iter, slots_per_op)) 334 alloc_start = iter; 335 else { 336 slots_found = 0; 337 continue; 338 } 339 } 340 341 if (slots_found == num_slots) { 342 struct iop_adma_desc_slot *alloc_tail = NULL; 343 struct iop_adma_desc_slot *last_used = NULL; 344 iter = alloc_start; 345 while (num_slots) { 346 int i; 347 dev_dbg(iop_chan->device->common.dev, 348 "allocated slot: %d " 349 "(desc %p phys: %#x) slots_per_op %d\n", 350 iter->idx, iter->hw_desc, 351 iter->async_tx.phys, slots_per_op); 352 353 /* pre-ack all but the last descriptor */ 354 if (num_slots != slots_per_op) 355 async_tx_ack(&iter->async_tx); 356 357 list_add_tail(&iter->chain_node, &chain); 358 alloc_tail = iter; 359 iter->async_tx.cookie = 0; 360 iter->slot_cnt = num_slots; 361 iter->xor_check_result = NULL; 362 for (i = 0; i < slots_per_op; i++) { 363 iter->slots_per_op = slots_per_op - i; 364 last_used = iter; 365 iter = list_entry(iter->slot_node.next, 366 struct iop_adma_desc_slot, 367 slot_node); 368 } 369 num_slots -= slots_per_op; 370 } 371 alloc_tail->group_head = alloc_start; 372 alloc_tail->async_tx.cookie = -EBUSY; 373 list_splice(&chain, &alloc_tail->async_tx.tx_list); 374 iop_chan->last_used = last_used; 375 iop_desc_clear_next_desc(alloc_start); 376 iop_desc_clear_next_desc(alloc_tail); 377 return alloc_tail; 378 } 379 } 380 if (!retry++) 381 goto retry; 382 383 /* perform direct reclaim if the allocation fails */ 384 __iop_adma_slot_cleanup(iop_chan); 385 386 return NULL; 387} 388 389static dma_cookie_t 390iop_desc_assign_cookie(struct iop_adma_chan *iop_chan, 391 struct iop_adma_desc_slot *desc) 392{ 393 dma_cookie_t cookie = iop_chan->common.cookie; 394 cookie++; 395 if (cookie < 0) 396 cookie = 1; 397 iop_chan->common.cookie = desc->async_tx.cookie = cookie; 398 return cookie; 399} 400 401static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) 402{ 403 dev_dbg(iop_chan->device->common.dev, "pending: %d\n", 404 iop_chan->pending); 405 406 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { 407 iop_chan->pending = 0; 408 iop_chan_append(iop_chan); 409 } 410} 411 412static dma_cookie_t 413iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) 414{ 415 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); 416 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); 417 struct iop_adma_desc_slot *grp_start, *old_chain_tail; 418 int slot_cnt; 419 int slots_per_op; 420 dma_cookie_t cookie; 421 dma_addr_t next_dma; 422 423 grp_start = sw_desc->group_head; 424 slot_cnt = grp_start->slot_cnt; 425 slots_per_op = grp_start->slots_per_op; 426 427 spin_lock_bh(&iop_chan->lock); 428 cookie = iop_desc_assign_cookie(iop_chan, sw_desc); 429 430 old_chain_tail = list_entry(iop_chan->chain.prev, 431 struct iop_adma_desc_slot, chain_node); 432 list_splice_init(&sw_desc->async_tx.tx_list, 433 &old_chain_tail->chain_node); 434 435 /* fix up the hardware chain */ 436 next_dma = grp_start->async_tx.phys; 437 iop_desc_set_next_desc(old_chain_tail, next_dma); 438 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ 439 440 /* check for pre-chained descriptors */ 441 iop_paranoia(iop_desc_get_next_desc(sw_desc)); 442 443 /* increment the pending count by the number of slots 444 * memcpy operations have a 1:1 (slot:operation) relation 445 * other operations are heavier and will pop the threshold 446 * more often. 447 */ 448 iop_chan->pending += slot_cnt; 449 iop_adma_check_threshold(iop_chan); 450 spin_unlock_bh(&iop_chan->lock); 451 452 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 453 __func__, sw_desc->async_tx.cookie, sw_desc->idx); 454 455 return cookie; 456} 457 458static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 459static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 460 461/** 462 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors 463 * @chan - allocate descriptor resources for this channel 464 * @client - current client requesting the channel be ready for requests 465 * 466 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To 467 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be 468 * greater than 2x the number slots needed to satisfy a device->max_xor 469 * request. 470 * */ 471static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 472{ 473 char *hw_desc; 474 int idx; 475 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 476 struct iop_adma_desc_slot *slot = NULL; 477 int init = iop_chan->slots_allocated ? 0 : 1; 478 struct iop_adma_platform_data *plat_data = 479 iop_chan->device->pdev->dev.platform_data; 480 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; 481 482 /* Allocate descriptor slots */ 483 do { 484 idx = iop_chan->slots_allocated; 485 if (idx == num_descs_in_pool) 486 break; 487 488 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 489 if (!slot) { 490 printk(KERN_INFO "IOP ADMA Channel only initialized" 491 " %d descriptor slots", idx); 492 break; 493 } 494 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; 495 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 496 497 dma_async_tx_descriptor_init(&slot->async_tx, chan); 498 slot->async_tx.tx_submit = iop_adma_tx_submit; 499 INIT_LIST_HEAD(&slot->chain_node); 500 INIT_LIST_HEAD(&slot->slot_node); 501 hw_desc = (char *) iop_chan->device->dma_desc_pool; 502 slot->async_tx.phys = 503 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 504 slot->idx = idx; 505 506 spin_lock_bh(&iop_chan->lock); 507 iop_chan->slots_allocated++; 508 list_add_tail(&slot->slot_node, &iop_chan->all_slots); 509 spin_unlock_bh(&iop_chan->lock); 510 } while (iop_chan->slots_allocated < num_descs_in_pool); 511 512 if (idx && !iop_chan->last_used) 513 iop_chan->last_used = list_entry(iop_chan->all_slots.next, 514 struct iop_adma_desc_slot, 515 slot_node); 516 517 dev_dbg(iop_chan->device->common.dev, 518 "allocated %d descriptor slots last_used: %p\n", 519 iop_chan->slots_allocated, iop_chan->last_used); 520 521 /* initialize the channel and the chain with a null operation */ 522 if (init) { 523 if (dma_has_cap(DMA_MEMCPY, 524 iop_chan->device->common.cap_mask)) 525 iop_chan_start_null_memcpy(iop_chan); 526 else if (dma_has_cap(DMA_XOR, 527 iop_chan->device->common.cap_mask)) 528 iop_chan_start_null_xor(iop_chan); 529 else 530 BUG(); 531 } 532 533 return (idx > 0) ? idx : -ENOMEM; 534} 535 536static struct dma_async_tx_descriptor * 537iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 538{ 539 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 540 struct iop_adma_desc_slot *sw_desc, *grp_start; 541 int slot_cnt, slots_per_op; 542 543 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 544 545 spin_lock_bh(&iop_chan->lock); 546 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); 547 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 548 if (sw_desc) { 549 grp_start = sw_desc->group_head; 550 iop_desc_init_interrupt(grp_start, iop_chan); 551 grp_start->unmap_len = 0; 552 sw_desc->async_tx.flags = flags; 553 } 554 spin_unlock_bh(&iop_chan->lock); 555 556 return sw_desc ? &sw_desc->async_tx : NULL; 557} 558 559static struct dma_async_tx_descriptor * 560iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 561 dma_addr_t dma_src, size_t len, unsigned long flags) 562{ 563 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 564 struct iop_adma_desc_slot *sw_desc, *grp_start; 565 int slot_cnt, slots_per_op; 566 567 if (unlikely(!len)) 568 return NULL; 569 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 570 571 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 572 __func__, len); 573 574 spin_lock_bh(&iop_chan->lock); 575 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); 576 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 577 if (sw_desc) { 578 grp_start = sw_desc->group_head; 579 iop_desc_init_memcpy(grp_start, flags); 580 iop_desc_set_byte_count(grp_start, iop_chan, len); 581 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 582 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 583 sw_desc->unmap_src_cnt = 1; 584 sw_desc->unmap_len = len; 585 sw_desc->async_tx.flags = flags; 586 } 587 spin_unlock_bh(&iop_chan->lock); 588 589 return sw_desc ? &sw_desc->async_tx : NULL; 590} 591 592static struct dma_async_tx_descriptor * 593iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, 594 int value, size_t len, unsigned long flags) 595{ 596 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 597 struct iop_adma_desc_slot *sw_desc, *grp_start; 598 int slot_cnt, slots_per_op; 599 600 if (unlikely(!len)) 601 return NULL; 602 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 603 604 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 605 __func__, len); 606 607 spin_lock_bh(&iop_chan->lock); 608 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); 609 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 610 if (sw_desc) { 611 grp_start = sw_desc->group_head; 612 iop_desc_init_memset(grp_start, flags); 613 iop_desc_set_byte_count(grp_start, iop_chan, len); 614 iop_desc_set_block_fill_val(grp_start, value); 615 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 616 sw_desc->unmap_src_cnt = 1; 617 sw_desc->unmap_len = len; 618 sw_desc->async_tx.flags = flags; 619 } 620 spin_unlock_bh(&iop_chan->lock); 621 622 return sw_desc ? &sw_desc->async_tx : NULL; 623} 624 625static struct dma_async_tx_descriptor * 626iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 627 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 628 unsigned long flags) 629{ 630 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 631 struct iop_adma_desc_slot *sw_desc, *grp_start; 632 int slot_cnt, slots_per_op; 633 634 if (unlikely(!len)) 635 return NULL; 636 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); 637 638 dev_dbg(iop_chan->device->common.dev, 639 "%s src_cnt: %d len: %u flags: %lx\n", 640 __func__, src_cnt, len, flags); 641 642 spin_lock_bh(&iop_chan->lock); 643 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 644 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 645 if (sw_desc) { 646 grp_start = sw_desc->group_head; 647 iop_desc_init_xor(grp_start, src_cnt, flags); 648 iop_desc_set_byte_count(grp_start, iop_chan, len); 649 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 650 sw_desc->unmap_src_cnt = src_cnt; 651 sw_desc->unmap_len = len; 652 sw_desc->async_tx.flags = flags; 653 while (src_cnt--) 654 iop_desc_set_xor_src_addr(grp_start, src_cnt, 655 dma_src[src_cnt]); 656 } 657 spin_unlock_bh(&iop_chan->lock); 658 659 return sw_desc ? &sw_desc->async_tx : NULL; 660} 661 662static struct dma_async_tx_descriptor * 663iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, 664 unsigned int src_cnt, size_t len, u32 *result, 665 unsigned long flags) 666{ 667 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 668 struct iop_adma_desc_slot *sw_desc, *grp_start; 669 int slot_cnt, slots_per_op; 670 671 if (unlikely(!len)) 672 return NULL; 673 674 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 675 __func__, src_cnt, len); 676 677 spin_lock_bh(&iop_chan->lock); 678 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); 679 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 680 if (sw_desc) { 681 grp_start = sw_desc->group_head; 682 iop_desc_init_zero_sum(grp_start, src_cnt, flags); 683 iop_desc_set_zero_sum_byte_count(grp_start, len); 684 grp_start->xor_check_result = result; 685 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 686 __func__, grp_start->xor_check_result); 687 sw_desc->unmap_src_cnt = src_cnt; 688 sw_desc->unmap_len = len; 689 sw_desc->async_tx.flags = flags; 690 while (src_cnt--) 691 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, 692 dma_src[src_cnt]); 693 } 694 spin_unlock_bh(&iop_chan->lock); 695 696 return sw_desc ? &sw_desc->async_tx : NULL; 697} 698 699static void iop_adma_free_chan_resources(struct dma_chan *chan) 700{ 701 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 702 struct iop_adma_desc_slot *iter, *_iter; 703 int in_use_descs = 0; 704 705 iop_adma_slot_cleanup(iop_chan); 706 707 spin_lock_bh(&iop_chan->lock); 708 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 709 chain_node) { 710 in_use_descs++; 711 list_del(&iter->chain_node); 712 } 713 list_for_each_entry_safe_reverse( 714 iter, _iter, &iop_chan->all_slots, slot_node) { 715 list_del(&iter->slot_node); 716 kfree(iter); 717 iop_chan->slots_allocated--; 718 } 719 iop_chan->last_used = NULL; 720 721 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 722 __func__, iop_chan->slots_allocated); 723 spin_unlock_bh(&iop_chan->lock); 724 725 /* one is ok since we left it on there on purpose */ 726 if (in_use_descs > 1) 727 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", 728 in_use_descs - 1); 729} 730 731/** 732 * iop_adma_is_complete - poll the status of an ADMA transaction 733 * @chan: ADMA channel handle 734 * @cookie: ADMA transaction identifier 735 */ 736static enum dma_status iop_adma_is_complete(struct dma_chan *chan, 737 dma_cookie_t cookie, 738 dma_cookie_t *done, 739 dma_cookie_t *used) 740{ 741 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 742 dma_cookie_t last_used; 743 dma_cookie_t last_complete; 744 enum dma_status ret; 745 746 last_used = chan->cookie; 747 last_complete = iop_chan->completed_cookie; 748 749 if (done) 750 *done = last_complete; 751 if (used) 752 *used = last_used; 753 754 ret = dma_async_is_complete(cookie, last_complete, last_used); 755 if (ret == DMA_SUCCESS) 756 return ret; 757 758 iop_adma_slot_cleanup(iop_chan); 759 760 last_used = chan->cookie; 761 last_complete = iop_chan->completed_cookie; 762 763 if (done) 764 *done = last_complete; 765 if (used) 766 *used = last_used; 767 768 return dma_async_is_complete(cookie, last_complete, last_used); 769} 770 771static irqreturn_t iop_adma_eot_handler(int irq, void *data) 772{ 773 struct iop_adma_chan *chan = data; 774 775 dev_dbg(chan->device->common.dev, "%s\n", __func__); 776 777 tasklet_schedule(&chan->irq_tasklet); 778 779 iop_adma_device_clear_eot_status(chan); 780 781 return IRQ_HANDLED; 782} 783 784static irqreturn_t iop_adma_eoc_handler(int irq, void *data) 785{ 786 struct iop_adma_chan *chan = data; 787 788 dev_dbg(chan->device->common.dev, "%s\n", __func__); 789 790 tasklet_schedule(&chan->irq_tasklet); 791 792 iop_adma_device_clear_eoc_status(chan); 793 794 return IRQ_HANDLED; 795} 796 797static irqreturn_t iop_adma_err_handler(int irq, void *data) 798{ 799 struct iop_adma_chan *chan = data; 800 unsigned long status = iop_chan_get_status(chan); 801 802 dev_printk(KERN_ERR, chan->device->common.dev, 803 "error ( %s%s%s%s%s%s%s)\n", 804 iop_is_err_int_parity(status, chan) ? "int_parity " : "", 805 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", 806 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", 807 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", 808 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", 809 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", 810 iop_is_err_split_tx(status, chan) ? "split_tx " : ""); 811 812 iop_adma_device_clear_err_status(chan); 813 814 BUG(); 815 816 return IRQ_HANDLED; 817} 818 819static void iop_adma_issue_pending(struct dma_chan *chan) 820{ 821 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 822 823 if (iop_chan->pending) { 824 iop_chan->pending = 0; 825 iop_chan_append(iop_chan); 826 } 827} 828 829/* 830 * Perform a transaction to verify the HW works. 831 */ 832#define IOP_ADMA_TEST_SIZE 2000 833 834static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) 835{ 836 int i; 837 void *src, *dest; 838 dma_addr_t src_dma, dest_dma; 839 struct dma_chan *dma_chan; 840 dma_cookie_t cookie; 841 struct dma_async_tx_descriptor *tx; 842 int err = 0; 843 struct iop_adma_chan *iop_chan; 844 845 dev_dbg(device->common.dev, "%s\n", __func__); 846 847 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 848 if (!src) 849 return -ENOMEM; 850 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 851 if (!dest) { 852 kfree(src); 853 return -ENOMEM; 854 } 855 856 /* Fill in src buffer */ 857 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) 858 ((u8 *) src)[i] = (u8)i; 859 860 /* Start copy, using first DMA channel */ 861 dma_chan = container_of(device->common.channels.next, 862 struct dma_chan, 863 device_node); 864 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 865 err = -ENODEV; 866 goto out; 867 } 868 869 dest_dma = dma_map_single(dma_chan->device->dev, dest, 870 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 871 src_dma = dma_map_single(dma_chan->device->dev, src, 872 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); 873 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 874 IOP_ADMA_TEST_SIZE, 875 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 876 877 cookie = iop_adma_tx_submit(tx); 878 iop_adma_issue_pending(dma_chan); 879 msleep(1); 880 881 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 882 DMA_SUCCESS) { 883 dev_printk(KERN_ERR, dma_chan->device->dev, 884 "Self-test copy timed out, disabling\n"); 885 err = -ENODEV; 886 goto free_resources; 887 } 888 889 iop_chan = to_iop_adma_chan(dma_chan); 890 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 891 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 892 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { 893 dev_printk(KERN_ERR, dma_chan->device->dev, 894 "Self-test copy failed compare, disabling\n"); 895 err = -ENODEV; 896 goto free_resources; 897 } 898 899free_resources: 900 iop_adma_free_chan_resources(dma_chan); 901out: 902 kfree(src); 903 kfree(dest); 904 return err; 905} 906 907#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ 908static int __devinit 909iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) 910{ 911 int i, src_idx; 912 struct page *dest; 913 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; 914 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 915 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 916 dma_addr_t dma_addr, dest_dma; 917 struct dma_async_tx_descriptor *tx; 918 struct dma_chan *dma_chan; 919 dma_cookie_t cookie; 920 u8 cmp_byte = 0; 921 u32 cmp_word; 922 u32 zero_sum_result; 923 int err = 0; 924 struct iop_adma_chan *iop_chan; 925 926 dev_dbg(device->common.dev, "%s\n", __func__); 927 928 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 929 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 930 if (!xor_srcs[src_idx]) { 931 while (src_idx--) 932 __free_page(xor_srcs[src_idx]); 933 return -ENOMEM; 934 } 935 } 936 937 dest = alloc_page(GFP_KERNEL); 938 if (!dest) { 939 while (src_idx--) 940 __free_page(xor_srcs[src_idx]); 941 return -ENOMEM; 942 } 943 944 /* Fill in src buffers */ 945 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 946 u8 *ptr = page_address(xor_srcs[src_idx]); 947 for (i = 0; i < PAGE_SIZE; i++) 948 ptr[i] = (1 << src_idx); 949 } 950 951 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) 952 cmp_byte ^= (u8) (1 << src_idx); 953 954 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 955 (cmp_byte << 8) | cmp_byte; 956 957 memset(page_address(dest), 0, PAGE_SIZE); 958 959 dma_chan = container_of(device->common.channels.next, 960 struct dma_chan, 961 device_node); 962 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 963 err = -ENODEV; 964 goto out; 965 } 966 967 /* test xor */ 968 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, 969 PAGE_SIZE, DMA_FROM_DEVICE); 970 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 971 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 972 0, PAGE_SIZE, DMA_TO_DEVICE); 973 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 974 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 975 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 976 977 cookie = iop_adma_tx_submit(tx); 978 iop_adma_issue_pending(dma_chan); 979 msleep(8); 980 981 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 982 DMA_SUCCESS) { 983 dev_printk(KERN_ERR, dma_chan->device->dev, 984 "Self-test xor timed out, disabling\n"); 985 err = -ENODEV; 986 goto free_resources; 987 } 988 989 iop_chan = to_iop_adma_chan(dma_chan); 990 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 991 PAGE_SIZE, DMA_FROM_DEVICE); 992 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 993 u32 *ptr = page_address(dest); 994 if (ptr[i] != cmp_word) { 995 dev_printk(KERN_ERR, dma_chan->device->dev, 996 "Self-test xor failed compare, disabling\n"); 997 err = -ENODEV; 998 goto free_resources; 999 } 1000 } 1001 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, 1002 PAGE_SIZE, DMA_TO_DEVICE); 1003 1004 /* skip zero sum if the capability is not present */ 1005 if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask)) 1006 goto free_resources; 1007 1008 /* zero sum the sources with the destintation page */ 1009 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1010 zero_sum_srcs[i] = xor_srcs[i]; 1011 zero_sum_srcs[i] = dest; 1012 1013 zero_sum_result = 1; 1014 1015 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1016 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1017 zero_sum_srcs[i], 0, PAGE_SIZE, 1018 DMA_TO_DEVICE); 1019 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, 1020 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1021 &zero_sum_result, 1022 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1023 1024 cookie = iop_adma_tx_submit(tx); 1025 iop_adma_issue_pending(dma_chan); 1026 msleep(8); 1027 1028 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1029 dev_printk(KERN_ERR, dma_chan->device->dev, 1030 "Self-test zero sum timed out, disabling\n"); 1031 err = -ENODEV; 1032 goto free_resources; 1033 } 1034 1035 if (zero_sum_result != 0) { 1036 dev_printk(KERN_ERR, dma_chan->device->dev, 1037 "Self-test zero sum failed compare, disabling\n"); 1038 err = -ENODEV; 1039 goto free_resources; 1040 } 1041 1042 /* test memset */ 1043 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, 1044 PAGE_SIZE, DMA_FROM_DEVICE); 1045 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1046 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1047 1048 cookie = iop_adma_tx_submit(tx); 1049 iop_adma_issue_pending(dma_chan); 1050 msleep(8); 1051 1052 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1053 dev_printk(KERN_ERR, dma_chan->device->dev, 1054 "Self-test memset timed out, disabling\n"); 1055 err = -ENODEV; 1056 goto free_resources; 1057 } 1058 1059 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { 1060 u32 *ptr = page_address(dest); 1061 if (ptr[i]) { 1062 dev_printk(KERN_ERR, dma_chan->device->dev, 1063 "Self-test memset failed compare, disabling\n"); 1064 err = -ENODEV; 1065 goto free_resources; 1066 } 1067 } 1068 1069 /* test for non-zero parity sum */ 1070 zero_sum_result = 0; 1071 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1072 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1073 zero_sum_srcs[i], 0, PAGE_SIZE, 1074 DMA_TO_DEVICE); 1075 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, 1076 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1077 &zero_sum_result, 1078 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1079 1080 cookie = iop_adma_tx_submit(tx); 1081 iop_adma_issue_pending(dma_chan); 1082 msleep(8); 1083 1084 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1085 dev_printk(KERN_ERR, dma_chan->device->dev, 1086 "Self-test non-zero sum timed out, disabling\n"); 1087 err = -ENODEV; 1088 goto free_resources; 1089 } 1090 1091 if (zero_sum_result != 1) { 1092 dev_printk(KERN_ERR, dma_chan->device->dev, 1093 "Self-test non-zero sum failed compare, disabling\n"); 1094 err = -ENODEV; 1095 goto free_resources; 1096 } 1097 1098free_resources: 1099 iop_adma_free_chan_resources(dma_chan); 1100out: 1101 src_idx = IOP_ADMA_NUM_SRC_TEST; 1102 while (src_idx--) 1103 __free_page(xor_srcs[src_idx]); 1104 __free_page(dest); 1105 return err; 1106} 1107 1108static int __devexit iop_adma_remove(struct platform_device *dev) 1109{ 1110 struct iop_adma_device *device = platform_get_drvdata(dev); 1111 struct dma_chan *chan, *_chan; 1112 struct iop_adma_chan *iop_chan; 1113 struct iop_adma_platform_data *plat_data = dev->dev.platform_data; 1114 1115 dma_async_device_unregister(&device->common); 1116 1117 dma_free_coherent(&dev->dev, plat_data->pool_size, 1118 device->dma_desc_pool_virt, device->dma_desc_pool); 1119 1120 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1121 device_node) { 1122 iop_chan = to_iop_adma_chan(chan); 1123 list_del(&chan->device_node); 1124 kfree(iop_chan); 1125 } 1126 kfree(device); 1127 1128 return 0; 1129} 1130 1131static int __devinit iop_adma_probe(struct platform_device *pdev) 1132{ 1133 struct resource *res; 1134 int ret = 0, i; 1135 struct iop_adma_device *adev; 1136 struct iop_adma_chan *iop_chan; 1137 struct dma_device *dma_dev; 1138 struct iop_adma_platform_data *plat_data = pdev->dev.platform_data; 1139 1140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1141 if (!res) 1142 return -ENODEV; 1143 1144 if (!devm_request_mem_region(&pdev->dev, res->start, 1145 res->end - res->start, pdev->name)) 1146 return -EBUSY; 1147 1148 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 1149 if (!adev) 1150 return -ENOMEM; 1151 dma_dev = &adev->common; 1152 1153 /* allocate coherent memory for hardware descriptors 1154 * note: writecombine gives slightly better performance, but 1155 * requires that we explicitly flush the writes 1156 */ 1157 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1158 plat_data->pool_size, 1159 &adev->dma_desc_pool, 1160 GFP_KERNEL)) == NULL) { 1161 ret = -ENOMEM; 1162 goto err_free_adev; 1163 } 1164 1165 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1166 __func__, adev->dma_desc_pool_virt, 1167 (void *) adev->dma_desc_pool); 1168 1169 adev->id = plat_data->hw_id; 1170 1171 /* discover transaction capabilites from the platform data */ 1172 dma_dev->cap_mask = plat_data->cap_mask; 1173 1174 adev->pdev = pdev; 1175 platform_set_drvdata(pdev, adev); 1176 1177 INIT_LIST_HEAD(&dma_dev->channels); 1178 1179 /* set base routines */ 1180 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1181 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1182 dma_dev->device_is_tx_complete = iop_adma_is_complete; 1183 dma_dev->device_issue_pending = iop_adma_issue_pending; 1184 dma_dev->dev = &pdev->dev; 1185 1186 /* set prep routines based on capability */ 1187 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1188 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; 1189 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1190 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; 1191 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1192 dma_dev->max_xor = iop_adma_get_max_xor(); 1193 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; 1194 } 1195 if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask)) 1196 dma_dev->device_prep_dma_zero_sum = 1197 iop_adma_prep_dma_zero_sum; 1198 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1199 dma_dev->device_prep_dma_interrupt = 1200 iop_adma_prep_dma_interrupt; 1201 1202 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); 1203 if (!iop_chan) { 1204 ret = -ENOMEM; 1205 goto err_free_dma; 1206 } 1207 iop_chan->device = adev; 1208 1209 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, 1210 res->end - res->start); 1211 if (!iop_chan->mmr_base) { 1212 ret = -ENOMEM; 1213 goto err_free_iop_chan; 1214 } 1215 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long) 1216 iop_chan); 1217 1218 /* clear errors before enabling interrupts */ 1219 iop_adma_device_clear_err_status(iop_chan); 1220 1221 for (i = 0; i < 3; i++) { 1222 irq_handler_t handler[] = { iop_adma_eot_handler, 1223 iop_adma_eoc_handler, 1224 iop_adma_err_handler }; 1225 int irq = platform_get_irq(pdev, i); 1226 if (irq < 0) { 1227 ret = -ENXIO; 1228 goto err_free_iop_chan; 1229 } else { 1230 ret = devm_request_irq(&pdev->dev, irq, 1231 handler[i], 0, pdev->name, iop_chan); 1232 if (ret) 1233 goto err_free_iop_chan; 1234 } 1235 } 1236 1237 spin_lock_init(&iop_chan->lock); 1238 INIT_LIST_HEAD(&iop_chan->chain); 1239 INIT_LIST_HEAD(&iop_chan->all_slots); 1240 iop_chan->common.device = dma_dev; 1241 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1242 1243 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1244 ret = iop_adma_memcpy_self_test(adev); 1245 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1246 if (ret) 1247 goto err_free_iop_chan; 1248 } 1249 1250 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || 1251 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { 1252 ret = iop_adma_xor_zero_sum_self_test(adev); 1253 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1254 if (ret) 1255 goto err_free_iop_chan; 1256 } 1257 1258 dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " 1259 "( %s%s%s%s%s%s%s%s%s%s)\n", 1260 dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", 1261 dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", 1262 dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "", 1263 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1264 dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", 1265 dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "", 1266 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1267 dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", 1268 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1269 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1270 1271 dma_async_device_register(dma_dev); 1272 goto out; 1273 1274 err_free_iop_chan: 1275 kfree(iop_chan); 1276 err_free_dma: 1277 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1278 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1279 err_free_adev: 1280 kfree(adev); 1281 out: 1282 return ret; 1283} 1284 1285static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) 1286{ 1287 struct iop_adma_desc_slot *sw_desc, *grp_start; 1288 dma_cookie_t cookie; 1289 int slot_cnt, slots_per_op; 1290 1291 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1292 1293 spin_lock_bh(&iop_chan->lock); 1294 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); 1295 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1296 if (sw_desc) { 1297 grp_start = sw_desc->group_head; 1298 1299 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); 1300 async_tx_ack(&sw_desc->async_tx); 1301 iop_desc_init_memcpy(grp_start, 0); 1302 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1303 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1304 iop_desc_set_memcpy_src_addr(grp_start, 0); 1305 1306 cookie = iop_chan->common.cookie; 1307 cookie++; 1308 if (cookie <= 1) 1309 cookie = 2; 1310 1311 /* initialize the completed cookie to be less than 1312 * the most recently used cookie 1313 */ 1314 iop_chan->completed_cookie = cookie - 1; 1315 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1316 1317 /* channel should not be busy */ 1318 BUG_ON(iop_chan_is_busy(iop_chan)); 1319 1320 /* clear any prior error-status bits */ 1321 iop_adma_device_clear_err_status(iop_chan); 1322 1323 /* disable operation */ 1324 iop_chan_disable(iop_chan); 1325 1326 /* set the descriptor address */ 1327 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1328 1329 /* 1/ don't add pre-chained descriptors 1330 * 2/ dummy read to flush next_desc write 1331 */ 1332 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1333 1334 /* run the descriptor */ 1335 iop_chan_enable(iop_chan); 1336 } else 1337 dev_printk(KERN_ERR, iop_chan->device->common.dev, 1338 "failed to allocate null descriptor\n"); 1339 spin_unlock_bh(&iop_chan->lock); 1340} 1341 1342static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) 1343{ 1344 struct iop_adma_desc_slot *sw_desc, *grp_start; 1345 dma_cookie_t cookie; 1346 int slot_cnt, slots_per_op; 1347 1348 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1349 1350 spin_lock_bh(&iop_chan->lock); 1351 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); 1352 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1353 if (sw_desc) { 1354 grp_start = sw_desc->group_head; 1355 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); 1356 async_tx_ack(&sw_desc->async_tx); 1357 iop_desc_init_null_xor(grp_start, 2, 0); 1358 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1359 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1360 iop_desc_set_xor_src_addr(grp_start, 0, 0); 1361 iop_desc_set_xor_src_addr(grp_start, 1, 0); 1362 1363 cookie = iop_chan->common.cookie; 1364 cookie++; 1365 if (cookie <= 1) 1366 cookie = 2; 1367 1368 /* initialize the completed cookie to be less than 1369 * the most recently used cookie 1370 */ 1371 iop_chan->completed_cookie = cookie - 1; 1372 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1373 1374 /* channel should not be busy */ 1375 BUG_ON(iop_chan_is_busy(iop_chan)); 1376 1377 /* clear any prior error-status bits */ 1378 iop_adma_device_clear_err_status(iop_chan); 1379 1380 /* disable operation */ 1381 iop_chan_disable(iop_chan); 1382 1383 /* set the descriptor address */ 1384 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1385 1386 /* 1/ don't add pre-chained descriptors 1387 * 2/ dummy read to flush next_desc write 1388 */ 1389 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1390 1391 /* run the descriptor */ 1392 iop_chan_enable(iop_chan); 1393 } else 1394 dev_printk(KERN_ERR, iop_chan->device->common.dev, 1395 "failed to allocate null descriptor\n"); 1396 spin_unlock_bh(&iop_chan->lock); 1397} 1398 1399MODULE_ALIAS("platform:iop-adma"); 1400 1401static struct platform_driver iop_adma_driver = { 1402 .probe = iop_adma_probe, 1403 .remove = __devexit_p(iop_adma_remove), 1404 .driver = { 1405 .owner = THIS_MODULE, 1406 .name = "iop-adma", 1407 }, 1408}; 1409 1410static int __init iop_adma_init (void) 1411{ 1412 return platform_driver_register(&iop_adma_driver); 1413} 1414 1415static void __exit iop_adma_exit (void) 1416{ 1417 platform_driver_unregister(&iop_adma_driver); 1418 return; 1419} 1420module_exit(iop_adma_exit); 1421module_init(iop_adma_init); 1422 1423MODULE_AUTHOR("Intel Corporation"); 1424MODULE_DESCRIPTION("IOP ADMA Engine Driver"); 1425MODULE_LICENSE("GPL");