Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28-rc6 1438 lines 41 kB view raw
1/* 2 * offload engine driver for the Intel Xscale series of i/o processors 3 * Copyright © 2006, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 */ 19 20/* 21 * This driver supports the asynchrounous DMA copy and RAID engines available 22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) 23 */ 24 25#include <linux/init.h> 26#include <linux/module.h> 27#include <linux/async_tx.h> 28#include <linux/delay.h> 29#include <linux/dma-mapping.h> 30#include <linux/spinlock.h> 31#include <linux/interrupt.h> 32#include <linux/platform_device.h> 33#include <linux/memory.h> 34#include <linux/ioport.h> 35 36#include <mach/adma.h> 37 38#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 39#define to_iop_adma_device(dev) \ 40 container_of(dev, struct iop_adma_device, common) 41#define tx_to_iop_adma_slot(tx) \ 42 container_of(tx, struct iop_adma_desc_slot, async_tx) 43 44/** 45 * iop_adma_free_slots - flags descriptor slots for reuse 46 * @slot: Slot to free 47 * Caller must hold &iop_chan->lock while calling this function 48 */ 49static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) 50{ 51 int stride = slot->slots_per_op; 52 53 while (stride--) { 54 slot->slots_per_op = 0; 55 slot = list_entry(slot->slot_node.next, 56 struct iop_adma_desc_slot, 57 slot_node); 58 } 59} 60 61static dma_cookie_t 62iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, 63 struct iop_adma_chan *iop_chan, dma_cookie_t cookie) 64{ 65 BUG_ON(desc->async_tx.cookie < 0); 66 if (desc->async_tx.cookie > 0) { 67 cookie = desc->async_tx.cookie; 68 desc->async_tx.cookie = 0; 69 70 /* call the callback (must not sleep or submit new 71 * operations to this channel) 72 */ 73 if (desc->async_tx.callback) 74 desc->async_tx.callback( 75 desc->async_tx.callback_param); 76 77 /* unmap dma addresses 78 * (unmap_single vs unmap_page?) 79 */ 80 if (desc->group_head && desc->unmap_len) { 81 struct iop_adma_desc_slot *unmap = desc->group_head; 82 struct device *dev = 83 &iop_chan->device->pdev->dev; 84 u32 len = unmap->unmap_len; 85 enum dma_ctrl_flags flags = desc->async_tx.flags; 86 u32 src_cnt; 87 dma_addr_t addr; 88 89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 90 addr = iop_desc_get_dest_addr(unmap, iop_chan); 91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 92 } 93 94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 95 src_cnt = unmap->unmap_src_cnt; 96 while (src_cnt--) { 97 addr = iop_desc_get_src_addr(unmap, 98 iop_chan, 99 src_cnt); 100 dma_unmap_page(dev, addr, len, 101 DMA_TO_DEVICE); 102 } 103 } 104 desc->group_head = NULL; 105 } 106 } 107 108 /* run dependent operations */ 109 async_tx_run_dependencies(&desc->async_tx); 110 111 return cookie; 112} 113 114static int 115iop_adma_clean_slot(struct iop_adma_desc_slot *desc, 116 struct iop_adma_chan *iop_chan) 117{ 118 /* the client is allowed to attach dependent operations 119 * until 'ack' is set 120 */ 121 if (!async_tx_test_ack(&desc->async_tx)) 122 return 0; 123 124 /* leave the last descriptor in the chain 125 * so we can append to it 126 */ 127 if (desc->chain_node.next == &iop_chan->chain) 128 return 1; 129 130 dev_dbg(iop_chan->device->common.dev, 131 "\tfree slot: %d slots_per_op: %d\n", 132 desc->idx, desc->slots_per_op); 133 134 list_del(&desc->chain_node); 135 iop_adma_free_slots(desc); 136 137 return 0; 138} 139 140static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 141{ 142 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; 143 dma_cookie_t cookie = 0; 144 u32 current_desc = iop_chan_get_current_descriptor(iop_chan); 145 int busy = iop_chan_is_busy(iop_chan); 146 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 147 148 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 149 /* free completed slots from the chain starting with 150 * the oldest descriptor 151 */ 152 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 153 chain_node) { 154 pr_debug("\tcookie: %d slot: %d busy: %d " 155 "this_desc: %#x next_desc: %#x ack: %d\n", 156 iter->async_tx.cookie, iter->idx, busy, 157 iter->async_tx.phys, iop_desc_get_next_desc(iter), 158 async_tx_test_ack(&iter->async_tx)); 159 prefetch(_iter); 160 prefetch(&_iter->async_tx); 161 162 /* do not advance past the current descriptor loaded into the 163 * hardware channel, subsequent descriptors are either in 164 * process or have not been submitted 165 */ 166 if (seen_current) 167 break; 168 169 /* stop the search if we reach the current descriptor and the 170 * channel is busy, or if it appears that the current descriptor 171 * needs to be re-read (i.e. has been appended to) 172 */ 173 if (iter->async_tx.phys == current_desc) { 174 BUG_ON(seen_current++); 175 if (busy || iop_desc_get_next_desc(iter)) 176 break; 177 } 178 179 /* detect the start of a group transaction */ 180 if (!slot_cnt && !slots_per_op) { 181 slot_cnt = iter->slot_cnt; 182 slots_per_op = iter->slots_per_op; 183 if (slot_cnt <= slots_per_op) { 184 slot_cnt = 0; 185 slots_per_op = 0; 186 } 187 } 188 189 if (slot_cnt) { 190 pr_debug("\tgroup++\n"); 191 if (!grp_start) 192 grp_start = iter; 193 slot_cnt -= slots_per_op; 194 } 195 196 /* all the members of a group are complete */ 197 if (slots_per_op != 0 && slot_cnt == 0) { 198 struct iop_adma_desc_slot *grp_iter, *_grp_iter; 199 int end_of_chain = 0; 200 pr_debug("\tgroup end\n"); 201 202 /* collect the total results */ 203 if (grp_start->xor_check_result) { 204 u32 zero_sum_result = 0; 205 slot_cnt = grp_start->slot_cnt; 206 grp_iter = grp_start; 207 208 list_for_each_entry_from(grp_iter, 209 &iop_chan->chain, chain_node) { 210 zero_sum_result |= 211 iop_desc_get_zero_result(grp_iter); 212 pr_debug("\titer%d result: %d\n", 213 grp_iter->idx, zero_sum_result); 214 slot_cnt -= slots_per_op; 215 if (slot_cnt == 0) 216 break; 217 } 218 pr_debug("\tgrp_start->xor_check_result: %p\n", 219 grp_start->xor_check_result); 220 *grp_start->xor_check_result = zero_sum_result; 221 } 222 223 /* clean up the group */ 224 slot_cnt = grp_start->slot_cnt; 225 grp_iter = grp_start; 226 list_for_each_entry_safe_from(grp_iter, _grp_iter, 227 &iop_chan->chain, chain_node) { 228 cookie = iop_adma_run_tx_complete_actions( 229 grp_iter, iop_chan, cookie); 230 231 slot_cnt -= slots_per_op; 232 end_of_chain = iop_adma_clean_slot(grp_iter, 233 iop_chan); 234 235 if (slot_cnt == 0 || end_of_chain) 236 break; 237 } 238 239 /* the group should be complete at this point */ 240 BUG_ON(slot_cnt); 241 242 slots_per_op = 0; 243 grp_start = NULL; 244 if (end_of_chain) 245 break; 246 else 247 continue; 248 } else if (slots_per_op) /* wait for group completion */ 249 continue; 250 251 /* write back zero sum results (single descriptor case) */ 252 if (iter->xor_check_result && iter->async_tx.cookie) 253 *iter->xor_check_result = 254 iop_desc_get_zero_result(iter); 255 256 cookie = iop_adma_run_tx_complete_actions( 257 iter, iop_chan, cookie); 258 259 if (iop_adma_clean_slot(iter, iop_chan)) 260 break; 261 } 262 263 BUG_ON(!seen_current); 264 265 if (cookie > 0) { 266 iop_chan->completed_cookie = cookie; 267 pr_debug("\tcompleted cookie %d\n", cookie); 268 } 269} 270 271static void 272iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 273{ 274 spin_lock_bh(&iop_chan->lock); 275 __iop_adma_slot_cleanup(iop_chan); 276 spin_unlock_bh(&iop_chan->lock); 277} 278 279static void iop_adma_tasklet(unsigned long data) 280{ 281 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; 282 283 spin_lock(&iop_chan->lock); 284 __iop_adma_slot_cleanup(iop_chan); 285 spin_unlock(&iop_chan->lock); 286} 287 288static struct iop_adma_desc_slot * 289iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, 290 int slots_per_op) 291{ 292 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; 293 LIST_HEAD(chain); 294 int slots_found, retry = 0; 295 296 /* start search from the last allocated descrtiptor 297 * if a contiguous allocation can not be found start searching 298 * from the beginning of the list 299 */ 300retry: 301 slots_found = 0; 302 if (retry == 0) 303 iter = iop_chan->last_used; 304 else 305 iter = list_entry(&iop_chan->all_slots, 306 struct iop_adma_desc_slot, 307 slot_node); 308 309 list_for_each_entry_safe_continue( 310 iter, _iter, &iop_chan->all_slots, slot_node) { 311 prefetch(_iter); 312 prefetch(&_iter->async_tx); 313 if (iter->slots_per_op) { 314 /* give up after finding the first busy slot 315 * on the second pass through the list 316 */ 317 if (retry) 318 break; 319 320 slots_found = 0; 321 continue; 322 } 323 324 /* start the allocation if the slot is correctly aligned */ 325 if (!slots_found++) { 326 if (iop_desc_is_aligned(iter, slots_per_op)) 327 alloc_start = iter; 328 else { 329 slots_found = 0; 330 continue; 331 } 332 } 333 334 if (slots_found == num_slots) { 335 struct iop_adma_desc_slot *alloc_tail = NULL; 336 struct iop_adma_desc_slot *last_used = NULL; 337 iter = alloc_start; 338 while (num_slots) { 339 int i; 340 dev_dbg(iop_chan->device->common.dev, 341 "allocated slot: %d " 342 "(desc %p phys: %#x) slots_per_op %d\n", 343 iter->idx, iter->hw_desc, 344 iter->async_tx.phys, slots_per_op); 345 346 /* pre-ack all but the last descriptor */ 347 if (num_slots != slots_per_op) 348 async_tx_ack(&iter->async_tx); 349 350 list_add_tail(&iter->chain_node, &chain); 351 alloc_tail = iter; 352 iter->async_tx.cookie = 0; 353 iter->slot_cnt = num_slots; 354 iter->xor_check_result = NULL; 355 for (i = 0; i < slots_per_op; i++) { 356 iter->slots_per_op = slots_per_op - i; 357 last_used = iter; 358 iter = list_entry(iter->slot_node.next, 359 struct iop_adma_desc_slot, 360 slot_node); 361 } 362 num_slots -= slots_per_op; 363 } 364 alloc_tail->group_head = alloc_start; 365 alloc_tail->async_tx.cookie = -EBUSY; 366 list_splice(&chain, &alloc_tail->async_tx.tx_list); 367 iop_chan->last_used = last_used; 368 iop_desc_clear_next_desc(alloc_start); 369 iop_desc_clear_next_desc(alloc_tail); 370 return alloc_tail; 371 } 372 } 373 if (!retry++) 374 goto retry; 375 376 /* perform direct reclaim if the allocation fails */ 377 __iop_adma_slot_cleanup(iop_chan); 378 379 return NULL; 380} 381 382static dma_cookie_t 383iop_desc_assign_cookie(struct iop_adma_chan *iop_chan, 384 struct iop_adma_desc_slot *desc) 385{ 386 dma_cookie_t cookie = iop_chan->common.cookie; 387 cookie++; 388 if (cookie < 0) 389 cookie = 1; 390 iop_chan->common.cookie = desc->async_tx.cookie = cookie; 391 return cookie; 392} 393 394static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) 395{ 396 dev_dbg(iop_chan->device->common.dev, "pending: %d\n", 397 iop_chan->pending); 398 399 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { 400 iop_chan->pending = 0; 401 iop_chan_append(iop_chan); 402 } 403} 404 405static dma_cookie_t 406iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) 407{ 408 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); 409 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); 410 struct iop_adma_desc_slot *grp_start, *old_chain_tail; 411 int slot_cnt; 412 int slots_per_op; 413 dma_cookie_t cookie; 414 dma_addr_t next_dma; 415 416 grp_start = sw_desc->group_head; 417 slot_cnt = grp_start->slot_cnt; 418 slots_per_op = grp_start->slots_per_op; 419 420 spin_lock_bh(&iop_chan->lock); 421 cookie = iop_desc_assign_cookie(iop_chan, sw_desc); 422 423 old_chain_tail = list_entry(iop_chan->chain.prev, 424 struct iop_adma_desc_slot, chain_node); 425 list_splice_init(&sw_desc->async_tx.tx_list, 426 &old_chain_tail->chain_node); 427 428 /* fix up the hardware chain */ 429 next_dma = grp_start->async_tx.phys; 430 iop_desc_set_next_desc(old_chain_tail, next_dma); 431 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ 432 433 /* check for pre-chained descriptors */ 434 iop_paranoia(iop_desc_get_next_desc(sw_desc)); 435 436 /* increment the pending count by the number of slots 437 * memcpy operations have a 1:1 (slot:operation) relation 438 * other operations are heavier and will pop the threshold 439 * more often. 440 */ 441 iop_chan->pending += slot_cnt; 442 iop_adma_check_threshold(iop_chan); 443 spin_unlock_bh(&iop_chan->lock); 444 445 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 446 __func__, sw_desc->async_tx.cookie, sw_desc->idx); 447 448 return cookie; 449} 450 451static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 452static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 453 454/** 455 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors 456 * @chan - allocate descriptor resources for this channel 457 * @client - current client requesting the channel be ready for requests 458 * 459 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To 460 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be 461 * greater than 2x the number slots needed to satisfy a device->max_xor 462 * request. 463 * */ 464static int iop_adma_alloc_chan_resources(struct dma_chan *chan, 465 struct dma_client *client) 466{ 467 char *hw_desc; 468 int idx; 469 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 470 struct iop_adma_desc_slot *slot = NULL; 471 int init = iop_chan->slots_allocated ? 0 : 1; 472 struct iop_adma_platform_data *plat_data = 473 iop_chan->device->pdev->dev.platform_data; 474 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; 475 476 /* Allocate descriptor slots */ 477 do { 478 idx = iop_chan->slots_allocated; 479 if (idx == num_descs_in_pool) 480 break; 481 482 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 483 if (!slot) { 484 printk(KERN_INFO "IOP ADMA Channel only initialized" 485 " %d descriptor slots", idx); 486 break; 487 } 488 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; 489 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 490 491 dma_async_tx_descriptor_init(&slot->async_tx, chan); 492 slot->async_tx.tx_submit = iop_adma_tx_submit; 493 INIT_LIST_HEAD(&slot->chain_node); 494 INIT_LIST_HEAD(&slot->slot_node); 495 INIT_LIST_HEAD(&slot->async_tx.tx_list); 496 hw_desc = (char *) iop_chan->device->dma_desc_pool; 497 slot->async_tx.phys = 498 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 499 slot->idx = idx; 500 501 spin_lock_bh(&iop_chan->lock); 502 iop_chan->slots_allocated++; 503 list_add_tail(&slot->slot_node, &iop_chan->all_slots); 504 spin_unlock_bh(&iop_chan->lock); 505 } while (iop_chan->slots_allocated < num_descs_in_pool); 506 507 if (idx && !iop_chan->last_used) 508 iop_chan->last_used = list_entry(iop_chan->all_slots.next, 509 struct iop_adma_desc_slot, 510 slot_node); 511 512 dev_dbg(iop_chan->device->common.dev, 513 "allocated %d descriptor slots last_used: %p\n", 514 iop_chan->slots_allocated, iop_chan->last_used); 515 516 /* initialize the channel and the chain with a null operation */ 517 if (init) { 518 if (dma_has_cap(DMA_MEMCPY, 519 iop_chan->device->common.cap_mask)) 520 iop_chan_start_null_memcpy(iop_chan); 521 else if (dma_has_cap(DMA_XOR, 522 iop_chan->device->common.cap_mask)) 523 iop_chan_start_null_xor(iop_chan); 524 else 525 BUG(); 526 } 527 528 return (idx > 0) ? idx : -ENOMEM; 529} 530 531static struct dma_async_tx_descriptor * 532iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 533{ 534 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 535 struct iop_adma_desc_slot *sw_desc, *grp_start; 536 int slot_cnt, slots_per_op; 537 538 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 539 540 spin_lock_bh(&iop_chan->lock); 541 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); 542 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 543 if (sw_desc) { 544 grp_start = sw_desc->group_head; 545 iop_desc_init_interrupt(grp_start, iop_chan); 546 grp_start->unmap_len = 0; 547 sw_desc->async_tx.flags = flags; 548 } 549 spin_unlock_bh(&iop_chan->lock); 550 551 return sw_desc ? &sw_desc->async_tx : NULL; 552} 553 554static struct dma_async_tx_descriptor * 555iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 556 dma_addr_t dma_src, size_t len, unsigned long flags) 557{ 558 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 559 struct iop_adma_desc_slot *sw_desc, *grp_start; 560 int slot_cnt, slots_per_op; 561 562 if (unlikely(!len)) 563 return NULL; 564 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 565 566 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 567 __func__, len); 568 569 spin_lock_bh(&iop_chan->lock); 570 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); 571 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 572 if (sw_desc) { 573 grp_start = sw_desc->group_head; 574 iop_desc_init_memcpy(grp_start, flags); 575 iop_desc_set_byte_count(grp_start, iop_chan, len); 576 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 577 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 578 sw_desc->unmap_src_cnt = 1; 579 sw_desc->unmap_len = len; 580 sw_desc->async_tx.flags = flags; 581 } 582 spin_unlock_bh(&iop_chan->lock); 583 584 return sw_desc ? &sw_desc->async_tx : NULL; 585} 586 587static struct dma_async_tx_descriptor * 588iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, 589 int value, size_t len, unsigned long flags) 590{ 591 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 592 struct iop_adma_desc_slot *sw_desc, *grp_start; 593 int slot_cnt, slots_per_op; 594 595 if (unlikely(!len)) 596 return NULL; 597 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 598 599 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 600 __func__, len); 601 602 spin_lock_bh(&iop_chan->lock); 603 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); 604 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 605 if (sw_desc) { 606 grp_start = sw_desc->group_head; 607 iop_desc_init_memset(grp_start, flags); 608 iop_desc_set_byte_count(grp_start, iop_chan, len); 609 iop_desc_set_block_fill_val(grp_start, value); 610 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 611 sw_desc->unmap_src_cnt = 1; 612 sw_desc->unmap_len = len; 613 sw_desc->async_tx.flags = flags; 614 } 615 spin_unlock_bh(&iop_chan->lock); 616 617 return sw_desc ? &sw_desc->async_tx : NULL; 618} 619 620static struct dma_async_tx_descriptor * 621iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 622 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 623 unsigned long flags) 624{ 625 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 626 struct iop_adma_desc_slot *sw_desc, *grp_start; 627 int slot_cnt, slots_per_op; 628 629 if (unlikely(!len)) 630 return NULL; 631 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); 632 633 dev_dbg(iop_chan->device->common.dev, 634 "%s src_cnt: %d len: %u flags: %lx\n", 635 __func__, src_cnt, len, flags); 636 637 spin_lock_bh(&iop_chan->lock); 638 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 639 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 640 if (sw_desc) { 641 grp_start = sw_desc->group_head; 642 iop_desc_init_xor(grp_start, src_cnt, flags); 643 iop_desc_set_byte_count(grp_start, iop_chan, len); 644 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 645 sw_desc->unmap_src_cnt = src_cnt; 646 sw_desc->unmap_len = len; 647 sw_desc->async_tx.flags = flags; 648 while (src_cnt--) 649 iop_desc_set_xor_src_addr(grp_start, src_cnt, 650 dma_src[src_cnt]); 651 } 652 spin_unlock_bh(&iop_chan->lock); 653 654 return sw_desc ? &sw_desc->async_tx : NULL; 655} 656 657static struct dma_async_tx_descriptor * 658iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, 659 unsigned int src_cnt, size_t len, u32 *result, 660 unsigned long flags) 661{ 662 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 663 struct iop_adma_desc_slot *sw_desc, *grp_start; 664 int slot_cnt, slots_per_op; 665 666 if (unlikely(!len)) 667 return NULL; 668 669 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 670 __func__, src_cnt, len); 671 672 spin_lock_bh(&iop_chan->lock); 673 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); 674 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 675 if (sw_desc) { 676 grp_start = sw_desc->group_head; 677 iop_desc_init_zero_sum(grp_start, src_cnt, flags); 678 iop_desc_set_zero_sum_byte_count(grp_start, len); 679 grp_start->xor_check_result = result; 680 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 681 __func__, grp_start->xor_check_result); 682 sw_desc->unmap_src_cnt = src_cnt; 683 sw_desc->unmap_len = len; 684 sw_desc->async_tx.flags = flags; 685 while (src_cnt--) 686 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, 687 dma_src[src_cnt]); 688 } 689 spin_unlock_bh(&iop_chan->lock); 690 691 return sw_desc ? &sw_desc->async_tx : NULL; 692} 693 694static void iop_adma_free_chan_resources(struct dma_chan *chan) 695{ 696 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 697 struct iop_adma_desc_slot *iter, *_iter; 698 int in_use_descs = 0; 699 700 iop_adma_slot_cleanup(iop_chan); 701 702 spin_lock_bh(&iop_chan->lock); 703 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 704 chain_node) { 705 in_use_descs++; 706 list_del(&iter->chain_node); 707 } 708 list_for_each_entry_safe_reverse( 709 iter, _iter, &iop_chan->all_slots, slot_node) { 710 list_del(&iter->slot_node); 711 kfree(iter); 712 iop_chan->slots_allocated--; 713 } 714 iop_chan->last_used = NULL; 715 716 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 717 __func__, iop_chan->slots_allocated); 718 spin_unlock_bh(&iop_chan->lock); 719 720 /* one is ok since we left it on there on purpose */ 721 if (in_use_descs > 1) 722 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", 723 in_use_descs - 1); 724} 725 726/** 727 * iop_adma_is_complete - poll the status of an ADMA transaction 728 * @chan: ADMA channel handle 729 * @cookie: ADMA transaction identifier 730 */ 731static enum dma_status iop_adma_is_complete(struct dma_chan *chan, 732 dma_cookie_t cookie, 733 dma_cookie_t *done, 734 dma_cookie_t *used) 735{ 736 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 737 dma_cookie_t last_used; 738 dma_cookie_t last_complete; 739 enum dma_status ret; 740 741 last_used = chan->cookie; 742 last_complete = iop_chan->completed_cookie; 743 744 if (done) 745 *done = last_complete; 746 if (used) 747 *used = last_used; 748 749 ret = dma_async_is_complete(cookie, last_complete, last_used); 750 if (ret == DMA_SUCCESS) 751 return ret; 752 753 iop_adma_slot_cleanup(iop_chan); 754 755 last_used = chan->cookie; 756 last_complete = iop_chan->completed_cookie; 757 758 if (done) 759 *done = last_complete; 760 if (used) 761 *used = last_used; 762 763 return dma_async_is_complete(cookie, last_complete, last_used); 764} 765 766static irqreturn_t iop_adma_eot_handler(int irq, void *data) 767{ 768 struct iop_adma_chan *chan = data; 769 770 dev_dbg(chan->device->common.dev, "%s\n", __func__); 771 772 tasklet_schedule(&chan->irq_tasklet); 773 774 iop_adma_device_clear_eot_status(chan); 775 776 return IRQ_HANDLED; 777} 778 779static irqreturn_t iop_adma_eoc_handler(int irq, void *data) 780{ 781 struct iop_adma_chan *chan = data; 782 783 dev_dbg(chan->device->common.dev, "%s\n", __func__); 784 785 tasklet_schedule(&chan->irq_tasklet); 786 787 iop_adma_device_clear_eoc_status(chan); 788 789 return IRQ_HANDLED; 790} 791 792static irqreturn_t iop_adma_err_handler(int irq, void *data) 793{ 794 struct iop_adma_chan *chan = data; 795 unsigned long status = iop_chan_get_status(chan); 796 797 dev_printk(KERN_ERR, chan->device->common.dev, 798 "error ( %s%s%s%s%s%s%s)\n", 799 iop_is_err_int_parity(status, chan) ? "int_parity " : "", 800 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", 801 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", 802 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", 803 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", 804 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", 805 iop_is_err_split_tx(status, chan) ? "split_tx " : ""); 806 807 iop_adma_device_clear_err_status(chan); 808 809 BUG(); 810 811 return IRQ_HANDLED; 812} 813 814static void iop_adma_issue_pending(struct dma_chan *chan) 815{ 816 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 817 818 if (iop_chan->pending) { 819 iop_chan->pending = 0; 820 iop_chan_append(iop_chan); 821 } 822} 823 824/* 825 * Perform a transaction to verify the HW works. 826 */ 827#define IOP_ADMA_TEST_SIZE 2000 828 829static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) 830{ 831 int i; 832 void *src, *dest; 833 dma_addr_t src_dma, dest_dma; 834 struct dma_chan *dma_chan; 835 dma_cookie_t cookie; 836 struct dma_async_tx_descriptor *tx; 837 int err = 0; 838 struct iop_adma_chan *iop_chan; 839 840 dev_dbg(device->common.dev, "%s\n", __func__); 841 842 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 843 if (!src) 844 return -ENOMEM; 845 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 846 if (!dest) { 847 kfree(src); 848 return -ENOMEM; 849 } 850 851 /* Fill in src buffer */ 852 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) 853 ((u8 *) src)[i] = (u8)i; 854 855 /* Start copy, using first DMA channel */ 856 dma_chan = container_of(device->common.channels.next, 857 struct dma_chan, 858 device_node); 859 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 860 err = -ENODEV; 861 goto out; 862 } 863 864 dest_dma = dma_map_single(dma_chan->device->dev, dest, 865 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 866 src_dma = dma_map_single(dma_chan->device->dev, src, 867 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); 868 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 869 IOP_ADMA_TEST_SIZE, 870 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 871 872 cookie = iop_adma_tx_submit(tx); 873 iop_adma_issue_pending(dma_chan); 874 msleep(1); 875 876 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 877 DMA_SUCCESS) { 878 dev_printk(KERN_ERR, dma_chan->device->dev, 879 "Self-test copy timed out, disabling\n"); 880 err = -ENODEV; 881 goto free_resources; 882 } 883 884 iop_chan = to_iop_adma_chan(dma_chan); 885 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 886 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 887 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { 888 dev_printk(KERN_ERR, dma_chan->device->dev, 889 "Self-test copy failed compare, disabling\n"); 890 err = -ENODEV; 891 goto free_resources; 892 } 893 894free_resources: 895 iop_adma_free_chan_resources(dma_chan); 896out: 897 kfree(src); 898 kfree(dest); 899 return err; 900} 901 902#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ 903static int __devinit 904iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) 905{ 906 int i, src_idx; 907 struct page *dest; 908 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; 909 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 910 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 911 dma_addr_t dma_addr, dest_dma; 912 struct dma_async_tx_descriptor *tx; 913 struct dma_chan *dma_chan; 914 dma_cookie_t cookie; 915 u8 cmp_byte = 0; 916 u32 cmp_word; 917 u32 zero_sum_result; 918 int err = 0; 919 struct iop_adma_chan *iop_chan; 920 921 dev_dbg(device->common.dev, "%s\n", __func__); 922 923 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 924 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 925 if (!xor_srcs[src_idx]) 926 while (src_idx--) { 927 __free_page(xor_srcs[src_idx]); 928 return -ENOMEM; 929 } 930 } 931 932 dest = alloc_page(GFP_KERNEL); 933 if (!dest) 934 while (src_idx--) { 935 __free_page(xor_srcs[src_idx]); 936 return -ENOMEM; 937 } 938 939 /* Fill in src buffers */ 940 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 941 u8 *ptr = page_address(xor_srcs[src_idx]); 942 for (i = 0; i < PAGE_SIZE; i++) 943 ptr[i] = (1 << src_idx); 944 } 945 946 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) 947 cmp_byte ^= (u8) (1 << src_idx); 948 949 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 950 (cmp_byte << 8) | cmp_byte; 951 952 memset(page_address(dest), 0, PAGE_SIZE); 953 954 dma_chan = container_of(device->common.channels.next, 955 struct dma_chan, 956 device_node); 957 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 958 err = -ENODEV; 959 goto out; 960 } 961 962 /* test xor */ 963 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, 964 PAGE_SIZE, DMA_FROM_DEVICE); 965 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 966 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 967 0, PAGE_SIZE, DMA_TO_DEVICE); 968 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 969 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 970 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 971 972 cookie = iop_adma_tx_submit(tx); 973 iop_adma_issue_pending(dma_chan); 974 msleep(8); 975 976 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 977 DMA_SUCCESS) { 978 dev_printk(KERN_ERR, dma_chan->device->dev, 979 "Self-test xor timed out, disabling\n"); 980 err = -ENODEV; 981 goto free_resources; 982 } 983 984 iop_chan = to_iop_adma_chan(dma_chan); 985 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 986 PAGE_SIZE, DMA_FROM_DEVICE); 987 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 988 u32 *ptr = page_address(dest); 989 if (ptr[i] != cmp_word) { 990 dev_printk(KERN_ERR, dma_chan->device->dev, 991 "Self-test xor failed compare, disabling\n"); 992 err = -ENODEV; 993 goto free_resources; 994 } 995 } 996 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, 997 PAGE_SIZE, DMA_TO_DEVICE); 998 999 /* skip zero sum if the capability is not present */ 1000 if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask)) 1001 goto free_resources; 1002 1003 /* zero sum the sources with the destintation page */ 1004 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1005 zero_sum_srcs[i] = xor_srcs[i]; 1006 zero_sum_srcs[i] = dest; 1007 1008 zero_sum_result = 1; 1009 1010 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1011 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1012 zero_sum_srcs[i], 0, PAGE_SIZE, 1013 DMA_TO_DEVICE); 1014 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, 1015 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1016 &zero_sum_result, 1017 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1018 1019 cookie = iop_adma_tx_submit(tx); 1020 iop_adma_issue_pending(dma_chan); 1021 msleep(8); 1022 1023 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1024 dev_printk(KERN_ERR, dma_chan->device->dev, 1025 "Self-test zero sum timed out, disabling\n"); 1026 err = -ENODEV; 1027 goto free_resources; 1028 } 1029 1030 if (zero_sum_result != 0) { 1031 dev_printk(KERN_ERR, dma_chan->device->dev, 1032 "Self-test zero sum failed compare, disabling\n"); 1033 err = -ENODEV; 1034 goto free_resources; 1035 } 1036 1037 /* test memset */ 1038 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, 1039 PAGE_SIZE, DMA_FROM_DEVICE); 1040 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1041 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1042 1043 cookie = iop_adma_tx_submit(tx); 1044 iop_adma_issue_pending(dma_chan); 1045 msleep(8); 1046 1047 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1048 dev_printk(KERN_ERR, dma_chan->device->dev, 1049 "Self-test memset timed out, disabling\n"); 1050 err = -ENODEV; 1051 goto free_resources; 1052 } 1053 1054 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { 1055 u32 *ptr = page_address(dest); 1056 if (ptr[i]) { 1057 dev_printk(KERN_ERR, dma_chan->device->dev, 1058 "Self-test memset failed compare, disabling\n"); 1059 err = -ENODEV; 1060 goto free_resources; 1061 } 1062 } 1063 1064 /* test for non-zero parity sum */ 1065 zero_sum_result = 0; 1066 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1067 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1068 zero_sum_srcs[i], 0, PAGE_SIZE, 1069 DMA_TO_DEVICE); 1070 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, 1071 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1072 &zero_sum_result, 1073 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1074 1075 cookie = iop_adma_tx_submit(tx); 1076 iop_adma_issue_pending(dma_chan); 1077 msleep(8); 1078 1079 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1080 dev_printk(KERN_ERR, dma_chan->device->dev, 1081 "Self-test non-zero sum timed out, disabling\n"); 1082 err = -ENODEV; 1083 goto free_resources; 1084 } 1085 1086 if (zero_sum_result != 1) { 1087 dev_printk(KERN_ERR, dma_chan->device->dev, 1088 "Self-test non-zero sum failed compare, disabling\n"); 1089 err = -ENODEV; 1090 goto free_resources; 1091 } 1092 1093free_resources: 1094 iop_adma_free_chan_resources(dma_chan); 1095out: 1096 src_idx = IOP_ADMA_NUM_SRC_TEST; 1097 while (src_idx--) 1098 __free_page(xor_srcs[src_idx]); 1099 __free_page(dest); 1100 return err; 1101} 1102 1103static int __devexit iop_adma_remove(struct platform_device *dev) 1104{ 1105 struct iop_adma_device *device = platform_get_drvdata(dev); 1106 struct dma_chan *chan, *_chan; 1107 struct iop_adma_chan *iop_chan; 1108 int i; 1109 struct iop_adma_platform_data *plat_data = dev->dev.platform_data; 1110 1111 dma_async_device_unregister(&device->common); 1112 1113 for (i = 0; i < 3; i++) { 1114 unsigned int irq; 1115 irq = platform_get_irq(dev, i); 1116 free_irq(irq, device); 1117 } 1118 1119 dma_free_coherent(&dev->dev, plat_data->pool_size, 1120 device->dma_desc_pool_virt, device->dma_desc_pool); 1121 1122 do { 1123 struct resource *res; 1124 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 1125 release_mem_region(res->start, res->end - res->start); 1126 } while (0); 1127 1128 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1129 device_node) { 1130 iop_chan = to_iop_adma_chan(chan); 1131 list_del(&chan->device_node); 1132 kfree(iop_chan); 1133 } 1134 kfree(device); 1135 1136 return 0; 1137} 1138 1139static int __devinit iop_adma_probe(struct platform_device *pdev) 1140{ 1141 struct resource *res; 1142 int ret = 0, i; 1143 struct iop_adma_device *adev; 1144 struct iop_adma_chan *iop_chan; 1145 struct dma_device *dma_dev; 1146 struct iop_adma_platform_data *plat_data = pdev->dev.platform_data; 1147 1148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1149 if (!res) 1150 return -ENODEV; 1151 1152 if (!devm_request_mem_region(&pdev->dev, res->start, 1153 res->end - res->start, pdev->name)) 1154 return -EBUSY; 1155 1156 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 1157 if (!adev) 1158 return -ENOMEM; 1159 dma_dev = &adev->common; 1160 1161 /* allocate coherent memory for hardware descriptors 1162 * note: writecombine gives slightly better performance, but 1163 * requires that we explicitly flush the writes 1164 */ 1165 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1166 plat_data->pool_size, 1167 &adev->dma_desc_pool, 1168 GFP_KERNEL)) == NULL) { 1169 ret = -ENOMEM; 1170 goto err_free_adev; 1171 } 1172 1173 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1174 __func__, adev->dma_desc_pool_virt, 1175 (void *) adev->dma_desc_pool); 1176 1177 adev->id = plat_data->hw_id; 1178 1179 /* discover transaction capabilites from the platform data */ 1180 dma_dev->cap_mask = plat_data->cap_mask; 1181 1182 adev->pdev = pdev; 1183 platform_set_drvdata(pdev, adev); 1184 1185 INIT_LIST_HEAD(&dma_dev->channels); 1186 1187 /* set base routines */ 1188 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1189 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1190 dma_dev->device_is_tx_complete = iop_adma_is_complete; 1191 dma_dev->device_issue_pending = iop_adma_issue_pending; 1192 dma_dev->dev = &pdev->dev; 1193 1194 /* set prep routines based on capability */ 1195 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1196 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; 1197 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1198 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; 1199 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1200 dma_dev->max_xor = iop_adma_get_max_xor(); 1201 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; 1202 } 1203 if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask)) 1204 dma_dev->device_prep_dma_zero_sum = 1205 iop_adma_prep_dma_zero_sum; 1206 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1207 dma_dev->device_prep_dma_interrupt = 1208 iop_adma_prep_dma_interrupt; 1209 1210 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); 1211 if (!iop_chan) { 1212 ret = -ENOMEM; 1213 goto err_free_dma; 1214 } 1215 iop_chan->device = adev; 1216 1217 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, 1218 res->end - res->start); 1219 if (!iop_chan->mmr_base) { 1220 ret = -ENOMEM; 1221 goto err_free_iop_chan; 1222 } 1223 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long) 1224 iop_chan); 1225 1226 /* clear errors before enabling interrupts */ 1227 iop_adma_device_clear_err_status(iop_chan); 1228 1229 for (i = 0; i < 3; i++) { 1230 irq_handler_t handler[] = { iop_adma_eot_handler, 1231 iop_adma_eoc_handler, 1232 iop_adma_err_handler }; 1233 int irq = platform_get_irq(pdev, i); 1234 if (irq < 0) { 1235 ret = -ENXIO; 1236 goto err_free_iop_chan; 1237 } else { 1238 ret = devm_request_irq(&pdev->dev, irq, 1239 handler[i], 0, pdev->name, iop_chan); 1240 if (ret) 1241 goto err_free_iop_chan; 1242 } 1243 } 1244 1245 spin_lock_init(&iop_chan->lock); 1246 INIT_LIST_HEAD(&iop_chan->chain); 1247 INIT_LIST_HEAD(&iop_chan->all_slots); 1248 INIT_RCU_HEAD(&iop_chan->common.rcu); 1249 iop_chan->common.device = dma_dev; 1250 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1251 1252 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1253 ret = iop_adma_memcpy_self_test(adev); 1254 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1255 if (ret) 1256 goto err_free_iop_chan; 1257 } 1258 1259 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || 1260 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { 1261 ret = iop_adma_xor_zero_sum_self_test(adev); 1262 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1263 if (ret) 1264 goto err_free_iop_chan; 1265 } 1266 1267 dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " 1268 "( %s%s%s%s%s%s%s%s%s%s)\n", 1269 dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", 1270 dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", 1271 dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "", 1272 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1273 dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", 1274 dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "", 1275 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1276 dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", 1277 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1278 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1279 1280 dma_async_device_register(dma_dev); 1281 goto out; 1282 1283 err_free_iop_chan: 1284 kfree(iop_chan); 1285 err_free_dma: 1286 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1287 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1288 err_free_adev: 1289 kfree(adev); 1290 out: 1291 return ret; 1292} 1293 1294static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) 1295{ 1296 struct iop_adma_desc_slot *sw_desc, *grp_start; 1297 dma_cookie_t cookie; 1298 int slot_cnt, slots_per_op; 1299 1300 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1301 1302 spin_lock_bh(&iop_chan->lock); 1303 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); 1304 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1305 if (sw_desc) { 1306 grp_start = sw_desc->group_head; 1307 1308 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); 1309 async_tx_ack(&sw_desc->async_tx); 1310 iop_desc_init_memcpy(grp_start, 0); 1311 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1312 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1313 iop_desc_set_memcpy_src_addr(grp_start, 0); 1314 1315 cookie = iop_chan->common.cookie; 1316 cookie++; 1317 if (cookie <= 1) 1318 cookie = 2; 1319 1320 /* initialize the completed cookie to be less than 1321 * the most recently used cookie 1322 */ 1323 iop_chan->completed_cookie = cookie - 1; 1324 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1325 1326 /* channel should not be busy */ 1327 BUG_ON(iop_chan_is_busy(iop_chan)); 1328 1329 /* clear any prior error-status bits */ 1330 iop_adma_device_clear_err_status(iop_chan); 1331 1332 /* disable operation */ 1333 iop_chan_disable(iop_chan); 1334 1335 /* set the descriptor address */ 1336 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1337 1338 /* 1/ don't add pre-chained descriptors 1339 * 2/ dummy read to flush next_desc write 1340 */ 1341 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1342 1343 /* run the descriptor */ 1344 iop_chan_enable(iop_chan); 1345 } else 1346 dev_printk(KERN_ERR, iop_chan->device->common.dev, 1347 "failed to allocate null descriptor\n"); 1348 spin_unlock_bh(&iop_chan->lock); 1349} 1350 1351static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) 1352{ 1353 struct iop_adma_desc_slot *sw_desc, *grp_start; 1354 dma_cookie_t cookie; 1355 int slot_cnt, slots_per_op; 1356 1357 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1358 1359 spin_lock_bh(&iop_chan->lock); 1360 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); 1361 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1362 if (sw_desc) { 1363 grp_start = sw_desc->group_head; 1364 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); 1365 async_tx_ack(&sw_desc->async_tx); 1366 iop_desc_init_null_xor(grp_start, 2, 0); 1367 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1368 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1369 iop_desc_set_xor_src_addr(grp_start, 0, 0); 1370 iop_desc_set_xor_src_addr(grp_start, 1, 0); 1371 1372 cookie = iop_chan->common.cookie; 1373 cookie++; 1374 if (cookie <= 1) 1375 cookie = 2; 1376 1377 /* initialize the completed cookie to be less than 1378 * the most recently used cookie 1379 */ 1380 iop_chan->completed_cookie = cookie - 1; 1381 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1382 1383 /* channel should not be busy */ 1384 BUG_ON(iop_chan_is_busy(iop_chan)); 1385 1386 /* clear any prior error-status bits */ 1387 iop_adma_device_clear_err_status(iop_chan); 1388 1389 /* disable operation */ 1390 iop_chan_disable(iop_chan); 1391 1392 /* set the descriptor address */ 1393 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1394 1395 /* 1/ don't add pre-chained descriptors 1396 * 2/ dummy read to flush next_desc write 1397 */ 1398 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1399 1400 /* run the descriptor */ 1401 iop_chan_enable(iop_chan); 1402 } else 1403 dev_printk(KERN_ERR, iop_chan->device->common.dev, 1404 "failed to allocate null descriptor\n"); 1405 spin_unlock_bh(&iop_chan->lock); 1406} 1407 1408MODULE_ALIAS("platform:iop-adma"); 1409 1410static struct platform_driver iop_adma_driver = { 1411 .probe = iop_adma_probe, 1412 .remove = iop_adma_remove, 1413 .driver = { 1414 .owner = THIS_MODULE, 1415 .name = "iop-adma", 1416 }, 1417}; 1418 1419static int __init iop_adma_init (void) 1420{ 1421 return platform_driver_register(&iop_adma_driver); 1422} 1423 1424/* it's currently unsafe to unload this module */ 1425#if 0 1426static void __exit iop_adma_exit (void) 1427{ 1428 platform_driver_unregister(&iop_adma_driver); 1429 return; 1430} 1431module_exit(iop_adma_exit); 1432#endif 1433 1434module_init(iop_adma_init); 1435 1436MODULE_AUTHOR("Intel Corporation"); 1437MODULE_DESCRIPTION("IOP ADMA Engine Driver"); 1438MODULE_LICENSE("GPL");