Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ioat: switch watchdog and reset handler from workqueue to timer

In order to support dynamic resizing of the descriptor ring or polling
for a descriptor in the presence of a hung channel the reset handler
needs to make progress while in a non-preemptible context. The current
workqueue implementation precludes polling channel reset completion
under spin_lock().

This conversion also allows us to return to opportunistic cleanup in the
ioat2 case as the timer implementation guarantees at least one cleanup
after every descriptor is submitted. This means the worst case
completion latency becomes the timer frequency (for exceptional
circumstances), but with the benefit of avoiding busy waiting when the
lock is contended.

Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+397 -437
+137 -216
drivers/dma/ioat/dma.c
··· 99 99 /* common channel initialization */ 100 100 void ioat_init_channel(struct ioatdma_device *device, 101 101 struct ioat_chan_common *chan, int idx, 102 - work_func_t work_fn, void (*tasklet)(unsigned long), 103 - unsigned long tasklet_data) 102 + void (*timer_fn)(unsigned long), 103 + void (*tasklet)(unsigned long), 104 + unsigned long ioat) 104 105 { 105 106 struct dma_device *dma = &device->common; 106 107 107 108 chan->device = device; 108 109 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 109 - INIT_DELAYED_WORK(&chan->work, work_fn); 110 110 spin_lock_init(&chan->cleanup_lock); 111 111 chan->common.device = dma; 112 112 list_add_tail(&chan->common.device_node, &dma->channels); 113 113 device->idx[idx] = chan; 114 - tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); 114 + init_timer(&chan->timer); 115 + chan->timer.function = timer_fn; 116 + chan->timer.data = ioat; 117 + tasklet_init(&chan->cleanup_task, tasklet, ioat); 115 118 tasklet_disable(&chan->cleanup_task); 116 119 } 117 120 118 - static void ioat1_reset_part2(struct work_struct *work); 121 + static void ioat1_timer_event(unsigned long data); 119 122 120 123 /** 121 124 * ioat1_dma_enumerate_channels - find and initialize the device's channels ··· 156 153 break; 157 154 158 155 ioat_init_channel(device, &ioat->base, i, 159 - ioat1_reset_part2, 156 + ioat1_timer_event, 160 157 ioat1_cleanup_tasklet, 161 158 (unsigned long) ioat); 162 159 ioat->xfercap = xfercap; ··· 196 193 } 197 194 198 195 /** 199 - * ioat1_reset_part2 - reinit the channel after a reset 200 - */ 201 - static void ioat1_reset_part2(struct work_struct *work) 202 - { 203 - struct ioat_chan_common *chan; 204 - struct ioat_dma_chan *ioat; 205 - struct ioat_desc_sw *desc; 206 - int dmacount; 207 - bool start_null = false; 208 - 209 - chan = container_of(work, struct ioat_chan_common, work.work); 210 - ioat = container_of(chan, struct ioat_dma_chan, base); 211 - spin_lock_bh(&chan->cleanup_lock); 212 - spin_lock_bh(&ioat->desc_lock); 213 - 214 - *chan->completion = 0; 215 - ioat->pending = 0; 216 - 217 - /* count the descriptors waiting */ 218 - dmacount = 0; 219 - if (ioat->used_desc.prev) { 220 - desc = to_ioat_desc(ioat->used_desc.prev); 221 - do { 222 - dmacount++; 223 - desc = to_ioat_desc(desc->node.next); 224 - } while (&desc->node != ioat->used_desc.next); 225 - } 226 - 227 - if (dmacount) { 228 - /* 229 - * write the new starting descriptor address 230 - * this puts channel engine into ARMED state 231 - */ 232 - desc = to_ioat_desc(ioat->used_desc.prev); 233 - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 234 - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 235 - writel(((u64) desc->txd.phys) >> 32, 236 - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 237 - 238 - writeb(IOAT_CHANCMD_START, chan->reg_base 239 - + IOAT_CHANCMD_OFFSET(chan->device->version)); 240 - } else 241 - start_null = true; 242 - spin_unlock_bh(&ioat->desc_lock); 243 - spin_unlock_bh(&chan->cleanup_lock); 244 - 245 - dev_err(to_dev(chan), 246 - "chan%d reset - %d descs waiting, %d total desc\n", 247 - chan_num(chan), dmacount, ioat->desccount); 248 - 249 - if (start_null) 250 - ioat1_dma_start_null_desc(ioat); 251 - } 252 - 253 - /** 254 196 * ioat1_reset_channel - restart a channel 255 197 * @ioat: IOAT DMA channel handle 256 198 */ ··· 205 257 void __iomem *reg_base = chan->reg_base; 206 258 u32 chansts, chanerr; 207 259 208 - if (!ioat->used_desc.prev) 209 - return; 210 - 211 - dev_dbg(to_dev(chan), "%s\n", __func__); 260 + dev_warn(to_dev(chan), "reset\n"); 212 261 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); 213 - chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; 262 + chansts = *chan->completion & IOAT_CHANSTS_STATUS; 214 263 if (chanerr) { 215 264 dev_err(to_dev(chan), 216 265 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", ··· 223 278 * while we're waiting. 224 279 */ 225 280 226 - spin_lock_bh(&ioat->desc_lock); 227 281 ioat->pending = INT_MIN; 228 282 writeb(IOAT_CHANCMD_RESET, 229 283 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); 230 - spin_unlock_bh(&ioat->desc_lock); 231 - 232 - /* schedule the 2nd half instead of sleeping a long time */ 233 - schedule_delayed_work(&chan->work, RESET_DELAY); 234 - } 235 - 236 - /** 237 - * ioat1_chan_watchdog - watch for stuck channels 238 - */ 239 - static void ioat1_chan_watchdog(struct work_struct *work) 240 - { 241 - struct ioatdma_device *device = 242 - container_of(work, struct ioatdma_device, work.work); 243 - struct ioat_dma_chan *ioat; 244 - struct ioat_chan_common *chan; 245 - int i; 246 - u64 completion; 247 - u32 completion_low; 248 - unsigned long compl_desc_addr_hw; 249 - 250 - for (i = 0; i < device->common.chancnt; i++) { 251 - chan = ioat_chan_by_index(device, i); 252 - ioat = container_of(chan, struct ioat_dma_chan, base); 253 - 254 - if (/* have we started processing anything yet */ 255 - chan->last_completion 256 - /* have we completed any since last watchdog cycle? */ 257 - && (chan->last_completion == chan->watchdog_completion) 258 - /* has TCP stuck on one cookie since last watchdog? */ 259 - && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) 260 - && (chan->watchdog_tcp_cookie != chan->completed_cookie) 261 - /* is there something in the chain to be processed? */ 262 - /* CB1 chain always has at least the last one processed */ 263 - && (ioat->used_desc.prev != ioat->used_desc.next) 264 - && ioat->pending == 0) { 265 - 266 - /* 267 - * check CHANSTS register for completed 268 - * descriptor address. 269 - * if it is different than completion writeback, 270 - * it is not zero 271 - * and it has changed since the last watchdog 272 - * we can assume that channel 273 - * is still working correctly 274 - * and the problem is in completion writeback. 275 - * update completion writeback 276 - * with actual CHANSTS value 277 - * else 278 - * try resetting the channel 279 - */ 280 - 281 - /* we need to read the low address first as this 282 - * causes the chipset to latch the upper bits 283 - * for the subsequent read 284 - */ 285 - completion_low = readl(chan->reg_base + 286 - IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); 287 - completion = readl(chan->reg_base + 288 - IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); 289 - completion <<= 32; 290 - completion |= completion_low; 291 - compl_desc_addr_hw = completion & 292 - IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 293 - 294 - if ((compl_desc_addr_hw != 0) 295 - && (compl_desc_addr_hw != chan->watchdog_completion) 296 - && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { 297 - chan->last_compl_desc_addr_hw = compl_desc_addr_hw; 298 - *chan->completion = completion; 299 - } else { 300 - ioat1_reset_channel(ioat); 301 - chan->watchdog_completion = 0; 302 - chan->last_compl_desc_addr_hw = 0; 303 - } 304 - } else { 305 - chan->last_compl_desc_addr_hw = 0; 306 - chan->watchdog_completion = chan->last_completion; 307 - } 308 - 309 - chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; 310 - } 311 - 312 - schedule_delayed_work(&device->work, WATCHDOG_DELAY); 284 + set_bit(IOAT_RESET_PENDING, &chan->state); 285 + mod_timer(&chan->timer, jiffies + RESET_DELAY); 313 286 } 314 287 315 288 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) ··· 235 372 struct dma_chan *c = tx->chan; 236 373 struct ioat_dma_chan *ioat = to_ioat_chan(c); 237 374 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); 375 + struct ioat_chan_common *chan = &ioat->base; 238 376 struct ioat_desc_sw *first; 239 377 struct ioat_desc_sw *chain_tail; 240 378 dma_cookie_t cookie; ··· 259 395 list_splice_tail_init(&tx->tx_list, &ioat->used_desc); 260 396 dump_desc_dbg(ioat, chain_tail); 261 397 dump_desc_dbg(ioat, first); 398 + 399 + if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 400 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 262 401 263 402 ioat->pending += desc->hw->tx_cnt; 264 403 if (ioat->pending >= ioat_pending_level) ··· 387 520 return; 388 521 389 522 tasklet_disable(&chan->cleanup_task); 523 + del_timer_sync(&chan->timer); 390 524 ioat1_cleanup(ioat); 391 525 392 526 /* Delay 100ms after reset to allow internal DMA logic to quiesce ··· 428 560 429 561 chan->last_completion = 0; 430 562 chan->completion_dma = 0; 431 - chan->watchdog_completion = 0; 432 - chan->last_compl_desc_addr_hw = 0; 433 - chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; 434 563 ioat->pending = 0; 435 564 ioat->desccount = 0; 436 565 } ··· 570 705 u64 completion; 571 706 572 707 completion = *chan->completion; 573 - phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 708 + phys_complete = ioat_chansts_to_addr(completion); 574 709 575 710 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 576 711 (unsigned long long) phys_complete); 577 712 578 - if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == 579 - IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { 713 + if (is_ioat_halted(completion)) { 714 + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 580 715 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", 581 - readl(chan->reg_base + IOAT_CHANERR_OFFSET)); 716 + chanerr); 582 717 583 718 /* TODO do something to salvage the situation */ 584 719 } ··· 586 721 return phys_complete; 587 722 } 588 723 589 - /** 590 - * ioat1_cleanup - cleanup up finished descriptors 591 - * @chan: ioat channel to be cleaned up 592 - */ 593 - static void ioat1_cleanup(struct ioat_dma_chan *ioat) 724 + bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 725 + unsigned long *phys_complete) 726 + { 727 + *phys_complete = ioat_get_current_completion(chan); 728 + if (*phys_complete == chan->last_completion) 729 + return false; 730 + clear_bit(IOAT_COMPLETION_ACK, &chan->state); 731 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 732 + 733 + return true; 734 + } 735 + 736 + static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) 594 737 { 595 738 struct ioat_chan_common *chan = &ioat->base; 596 - unsigned long phys_complete; 597 - struct ioat_desc_sw *desc, *_desc; 598 - dma_cookie_t cookie = 0; 739 + struct list_head *_desc, *n; 599 740 struct dma_async_tx_descriptor *tx; 600 - 601 - prefetch(chan->completion); 602 - 603 - if (!spin_trylock_bh(&chan->cleanup_lock)) 604 - return; 605 - 606 - phys_complete = ioat_get_current_completion(chan); 607 - if (phys_complete == chan->last_completion) { 608 - spin_unlock_bh(&chan->cleanup_lock); 609 - /* 610 - * perhaps we're stuck so hard that the watchdog can't go off? 611 - * try to catch it after 2 seconds 612 - */ 613 - if (time_after(jiffies, 614 - chan->last_completion_time + HZ*WATCHDOG_DELAY)) { 615 - ioat1_chan_watchdog(&(chan->device->work.work)); 616 - chan->last_completion_time = jiffies; 617 - } 618 - return; 619 - } 620 - chan->last_completion_time = jiffies; 621 - 622 - cookie = 0; 623 - if (!spin_trylock_bh(&ioat->desc_lock)) { 624 - spin_unlock_bh(&chan->cleanup_lock); 625 - return; 626 - } 627 741 628 742 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 629 743 __func__, phys_complete); 630 - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { 744 + list_for_each_safe(_desc, n, &ioat->used_desc) { 745 + struct ioat_desc_sw *desc; 746 + 747 + prefetch(n); 748 + desc = list_entry(_desc, typeof(*desc), node); 631 749 tx = &desc->txd; 632 750 /* 633 751 * Incoming DMA requests may use multiple descriptors, ··· 619 771 */ 620 772 dump_desc_dbg(ioat, desc); 621 773 if (tx->cookie) { 622 - cookie = tx->cookie; 774 + chan->completed_cookie = tx->cookie; 775 + tx->cookie = 0; 623 776 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 624 777 if (tx->callback) { 625 778 tx->callback(tx->callback_param); ··· 635 786 */ 636 787 if (async_tx_test_ack(tx)) 637 788 list_move_tail(&desc->node, &ioat->free_desc); 638 - else 639 - tx->cookie = 0; 640 789 } else { 641 790 /* 642 791 * last used desc. Do not remove, so we can 643 - * append from it, but don't look at it next 644 - * time, either 792 + * append from it. 645 793 */ 646 - tx->cookie = 0; 794 + 795 + /* if nothing else is pending, cancel the 796 + * completion timeout 797 + */ 798 + if (n == &ioat->used_desc) { 799 + dev_dbg(to_dev(chan), 800 + "%s cancel completion timeout\n", 801 + __func__); 802 + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 803 + } 647 804 648 805 /* TODO check status bits? */ 649 806 break; 650 807 } 651 808 } 652 809 653 - spin_unlock_bh(&ioat->desc_lock); 654 - 655 810 chan->last_completion = phys_complete; 656 - if (cookie != 0) 657 - chan->completed_cookie = cookie; 811 + } 658 812 813 + /** 814 + * ioat1_cleanup - cleanup up finished descriptors 815 + * @chan: ioat channel to be cleaned up 816 + * 817 + * To prevent lock contention we defer cleanup when the locks are 818 + * contended with a terminal timeout that forces cleanup and catches 819 + * completion notification errors. 820 + */ 821 + static void ioat1_cleanup(struct ioat_dma_chan *ioat) 822 + { 823 + struct ioat_chan_common *chan = &ioat->base; 824 + unsigned long phys_complete; 825 + 826 + prefetch(chan->completion); 827 + 828 + if (!spin_trylock_bh(&chan->cleanup_lock)) 829 + return; 830 + 831 + if (!ioat_cleanup_preamble(chan, &phys_complete)) { 832 + spin_unlock_bh(&chan->cleanup_lock); 833 + return; 834 + } 835 + 836 + if (!spin_trylock_bh(&ioat->desc_lock)) { 837 + spin_unlock_bh(&chan->cleanup_lock); 838 + return; 839 + } 840 + 841 + __cleanup(ioat, phys_complete); 842 + 843 + spin_unlock_bh(&ioat->desc_lock); 844 + spin_unlock_bh(&chan->cleanup_lock); 845 + } 846 + 847 + static void ioat1_timer_event(unsigned long data) 848 + { 849 + struct ioat_dma_chan *ioat = (void *) data; 850 + struct ioat_chan_common *chan = &ioat->base; 851 + 852 + dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); 853 + 854 + spin_lock_bh(&chan->cleanup_lock); 855 + if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { 856 + struct ioat_desc_sw *desc; 857 + 858 + spin_lock_bh(&ioat->desc_lock); 859 + 860 + /* restart active descriptors */ 861 + desc = to_ioat_desc(ioat->used_desc.prev); 862 + ioat_set_chainaddr(ioat, desc->txd.phys); 863 + ioat_start(chan); 864 + 865 + ioat->pending = 0; 866 + set_bit(IOAT_COMPLETION_PENDING, &chan->state); 867 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 868 + spin_unlock_bh(&ioat->desc_lock); 869 + } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 870 + unsigned long phys_complete; 871 + 872 + spin_lock_bh(&ioat->desc_lock); 873 + /* if we haven't made progress and we have already 874 + * acknowledged a pending completion once, then be more 875 + * forceful with a restart 876 + */ 877 + if (ioat_cleanup_preamble(chan, &phys_complete)) 878 + __cleanup(ioat, phys_complete); 879 + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 880 + ioat1_reset_channel(ioat); 881 + else { 882 + u64 status = ioat_chansts(chan); 883 + 884 + /* manually update the last completion address */ 885 + if (ioat_chansts_to_addr(status) != 0) 886 + *chan->completion = status; 887 + 888 + set_bit(IOAT_COMPLETION_ACK, &chan->state); 889 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 890 + } 891 + spin_unlock_bh(&ioat->desc_lock); 892 + } 659 893 spin_unlock_bh(&chan->cleanup_lock); 660 894 } 661 895 ··· 787 855 list_add_tail(&desc->node, &ioat->used_desc); 788 856 dump_desc_dbg(ioat, desc); 789 857 790 - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 791 - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 792 - writel(((u64) desc->txd.phys) >> 32, 793 - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 794 - 795 - writeb(IOAT_CHANCMD_START, chan->reg_base 796 - + IOAT_CHANCMD_OFFSET(chan->device->version)); 858 + ioat_set_chainaddr(ioat, desc->txd.phys); 859 + ioat_start(chan); 797 860 spin_unlock_bh(&ioat->desc_lock); 798 861 } 799 862 ··· 1121 1194 if (dca) 1122 1195 device->dca = ioat_dca_init(pdev, device->reg_base); 1123 1196 1124 - INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog); 1125 - schedule_delayed_work(&device->work, WATCHDOG_DELAY); 1126 - 1127 1197 return err; 1128 1198 } 1129 1199 1130 1200 void __devexit ioat_dma_remove(struct ioatdma_device *device) 1131 1201 { 1132 1202 struct dma_device *dma = &device->common; 1133 - 1134 - if (device->version != IOAT_VER_3_0) 1135 - cancel_delayed_work(&device->work); 1136 1203 1137 1204 ioat_disable_interrupts(device); 1138 1205
+93 -19
drivers/dma/ioat/dma.h
··· 23 23 24 24 #include <linux/dmaengine.h> 25 25 #include "hw.h" 26 + #include "registers.h" 26 27 #include <linux/init.h> 27 28 #include <linux/dmapool.h> 28 29 #include <linux/cache.h> ··· 34 33 35 34 #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 36 35 #define IOAT_DMA_DCA_ANY_CPU ~0 37 - #define IOAT_WATCHDOG_PERIOD (2 * HZ) 38 36 39 37 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) 40 38 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) ··· 41 41 #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) 42 42 43 43 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) 44 - 45 - #define RESET_DELAY msecs_to_jiffies(100) 46 - #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) 47 44 48 45 /* 49 46 * workaround for IOAT ver.3.0 null descriptor issue ··· 69 72 struct pci_pool *completion_pool; 70 73 struct dma_device common; 71 74 u8 version; 72 - struct delayed_work work; 73 75 struct msix_entry msix_entries[4]; 74 76 struct ioat_chan_common *idx[4]; 75 77 struct dca_provider *dca; ··· 77 81 }; 78 82 79 83 struct ioat_chan_common { 84 + struct dma_chan common; 80 85 void __iomem *reg_base; 81 - 82 86 unsigned long last_completion; 83 - unsigned long last_completion_time; 84 - 85 87 spinlock_t cleanup_lock; 86 88 dma_cookie_t completed_cookie; 87 - unsigned long watchdog_completion; 88 - int watchdog_tcp_cookie; 89 - u32 watchdog_last_tcp_cookie; 90 - struct delayed_work work; 91 - 89 + unsigned long state; 90 + #define IOAT_COMPLETION_PENDING 0 91 + #define IOAT_COMPLETION_ACK 1 92 + #define IOAT_RESET_PENDING 2 93 + struct timer_list timer; 94 + #define COMPLETION_TIMEOUT msecs_to_jiffies(100) 95 + #define RESET_DELAY msecs_to_jiffies(100) 92 96 struct ioatdma_device *device; 93 - struct dma_chan common; 94 - 95 97 dma_addr_t completion_dma; 96 98 u64 *completion; 97 - unsigned long last_compl_desc_addr_hw; 98 99 struct tasklet_struct cleanup_task; 99 100 }; 100 101 ··· 141 148 142 149 last_used = c->cookie; 143 150 last_complete = chan->completed_cookie; 144 - chan->watchdog_tcp_cookie = cookie; 145 151 146 152 if (done) 147 153 *done = last_complete; ··· 207 215 return device->idx[index]; 208 216 } 209 217 218 + static inline u64 ioat_chansts(struct ioat_chan_common *chan) 219 + { 220 + u8 ver = chan->device->version; 221 + u64 status; 222 + u32 status_lo; 223 + 224 + /* We need to read the low address first as this causes the 225 + * chipset to latch the upper bits for the subsequent read 226 + */ 227 + status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); 228 + status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); 229 + status <<= 32; 230 + status |= status_lo; 231 + 232 + return status; 233 + } 234 + 235 + static inline void ioat_start(struct ioat_chan_common *chan) 236 + { 237 + u8 ver = chan->device->version; 238 + 239 + writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 240 + } 241 + 242 + static inline u64 ioat_chansts_to_addr(u64 status) 243 + { 244 + return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 245 + } 246 + 247 + static inline u32 ioat_chanerr(struct ioat_chan_common *chan) 248 + { 249 + return readl(chan->reg_base + IOAT_CHANERR_OFFSET); 250 + } 251 + 252 + static inline void ioat_suspend(struct ioat_chan_common *chan) 253 + { 254 + u8 ver = chan->device->version; 255 + 256 + writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 257 + } 258 + 259 + static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) 260 + { 261 + struct ioat_chan_common *chan = &ioat->base; 262 + 263 + writel(addr & 0x00000000FFFFFFFF, 264 + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 265 + writel(addr >> 32, 266 + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 267 + } 268 + 269 + static inline bool is_ioat_active(unsigned long status) 270 + { 271 + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); 272 + } 273 + 274 + static inline bool is_ioat_idle(unsigned long status) 275 + { 276 + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); 277 + } 278 + 279 + static inline bool is_ioat_halted(unsigned long status) 280 + { 281 + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); 282 + } 283 + 284 + static inline bool is_ioat_suspended(unsigned long status) 285 + { 286 + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); 287 + } 288 + 289 + /* channel was fatally programmed */ 290 + static inline bool is_ioat_bug(unsigned long err) 291 + { 292 + return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| 293 + IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| 294 + IOAT_CHANERR_LENGTH_ERR)); 295 + } 296 + 210 297 int __devinit ioat_probe(struct ioatdma_device *device); 211 298 int __devinit ioat_register(struct ioatdma_device *device); 212 299 int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); ··· 295 224 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 296 225 void ioat_init_channel(struct ioatdma_device *device, 297 226 struct ioat_chan_common *chan, int idx, 298 - work_func_t work_fn, void (*tasklet)(unsigned long), 299 - unsigned long tasklet_data); 227 + void (*timer_fn)(unsigned long), 228 + void (*tasklet)(unsigned long), 229 + unsigned long ioat); 300 230 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 301 231 size_t len, struct ioat_dma_descriptor *hw); 232 + bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 233 + unsigned long *phys_complete); 302 234 #endif /* IOATDMA_H */
+138 -183
drivers/dma/ioat/dma_v2.c
··· 49 49 void * __iomem reg_base = ioat->base.reg_base; 50 50 51 51 ioat->pending = 0; 52 - ioat->dmacount += ioat2_ring_pending(ioat); 52 + ioat->dmacount += ioat2_ring_pending(ioat);; 53 53 ioat->issued = ioat->head; 54 54 /* make descriptor updates globally visible before notifying channel */ 55 55 wmb(); ··· 92 92 93 93 static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 94 94 { 95 - void __iomem *reg_base = ioat->base.reg_base; 96 95 struct ioat_ring_ent *desc; 97 96 struct ioat_dma_descriptor *hw; 98 97 int idx; ··· 117 118 hw->src_addr = 0; 118 119 hw->dst_addr = 0; 119 120 async_tx_ack(&desc->txd); 120 - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 121 - reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 122 - writel(((u64) desc->txd.phys) >> 32, 123 - reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 121 + ioat2_set_chainaddr(ioat, desc->txd.phys); 124 122 dump_desc_dbg(ioat, desc); 125 123 __ioat2_issue_pending(ioat); 126 124 } ··· 129 133 spin_unlock_bh(&ioat->ring_lock); 130 134 } 131 135 132 - static void ioat2_cleanup(struct ioat2_dma_chan *ioat); 133 - 134 - /** 135 - * ioat2_reset_part2 - reinit the channel after a reset 136 - */ 137 - static void ioat2_reset_part2(struct work_struct *work) 138 - { 139 - struct ioat_chan_common *chan; 140 - struct ioat2_dma_chan *ioat; 141 - 142 - chan = container_of(work, struct ioat_chan_common, work.work); 143 - ioat = container_of(chan, struct ioat2_dma_chan, base); 144 - 145 - /* ensure that ->tail points to the stalled descriptor 146 - * (ioat->pending is set to 2 at this point so no new 147 - * descriptors will be issued while we perform this cleanup) 148 - */ 149 - ioat2_cleanup(ioat); 150 - 151 - spin_lock_bh(&chan->cleanup_lock); 152 - spin_lock_bh(&ioat->ring_lock); 153 - 154 - /* set the tail to be re-issued */ 155 - ioat->issued = ioat->tail; 156 - ioat->dmacount = 0; 157 - 158 - dev_dbg(to_dev(&ioat->base), 159 - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", 160 - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 161 - 162 - if (ioat2_ring_pending(ioat)) { 163 - struct ioat_ring_ent *desc; 164 - 165 - desc = ioat2_get_ring_ent(ioat, ioat->tail); 166 - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 167 - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 168 - writel(((u64) desc->txd.phys) >> 32, 169 - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 170 - __ioat2_issue_pending(ioat); 171 - } else 172 - __ioat2_start_null_desc(ioat); 173 - 174 - spin_unlock_bh(&ioat->ring_lock); 175 - spin_unlock_bh(&chan->cleanup_lock); 176 - 177 - dev_info(to_dev(chan), 178 - "chan%d reset - %d descs waiting, %d total desc\n", 179 - chan_num(chan), ioat->dmacount, 1 << ioat->alloc_order); 180 - } 181 - 182 - /** 183 - * ioat2_reset_channel - restart a channel 184 - * @ioat: IOAT DMA channel handle 185 - */ 186 - static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) 187 - { 188 - u32 chansts, chanerr; 189 - struct ioat_chan_common *chan = &ioat->base; 190 - u16 active; 191 - 192 - spin_lock_bh(&ioat->ring_lock); 193 - active = ioat2_ring_active(ioat); 194 - spin_unlock_bh(&ioat->ring_lock); 195 - if (!active) 196 - return; 197 - 198 - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 199 - chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; 200 - if (chanerr) { 201 - dev_err(to_dev(chan), 202 - "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", 203 - chan_num(chan), chansts, chanerr); 204 - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 205 - } 206 - 207 - spin_lock_bh(&ioat->ring_lock); 208 - ioat->pending = 2; 209 - writeb(IOAT_CHANCMD_RESET, 210 - chan->reg_base 211 - + IOAT_CHANCMD_OFFSET(chan->device->version)); 212 - spin_unlock_bh(&ioat->ring_lock); 213 - schedule_delayed_work(&chan->work, RESET_DELAY); 214 - } 215 - 216 - /** 217 - * ioat2_chan_watchdog - watch for stuck channels 218 - */ 219 - static void ioat2_chan_watchdog(struct work_struct *work) 220 - { 221 - struct ioatdma_device *device = 222 - container_of(work, struct ioatdma_device, work.work); 223 - struct ioat2_dma_chan *ioat; 224 - struct ioat_chan_common *chan; 225 - u16 active; 226 - int i; 227 - 228 - dev_dbg(&device->pdev->dev, "%s\n", __func__); 229 - 230 - for (i = 0; i < device->common.chancnt; i++) { 231 - chan = ioat_chan_by_index(device, i); 232 - ioat = container_of(chan, struct ioat2_dma_chan, base); 233 - 234 - /* 235 - * for version 2.0 if there are descriptors yet to be processed 236 - * and the last completed hasn't changed since the last watchdog 237 - * if they haven't hit the pending level 238 - * issue the pending to push them through 239 - * else 240 - * try resetting the channel 241 - */ 242 - spin_lock_bh(&ioat->ring_lock); 243 - active = ioat2_ring_active(ioat); 244 - spin_unlock_bh(&ioat->ring_lock); 245 - 246 - if (active && 247 - chan->last_completion && 248 - chan->last_completion == chan->watchdog_completion) { 249 - 250 - if (ioat->pending == 1) 251 - ioat2_issue_pending(&chan->common); 252 - else { 253 - ioat2_reset_channel(ioat); 254 - chan->watchdog_completion = 0; 255 - } 256 - } else { 257 - chan->last_compl_desc_addr_hw = 0; 258 - chan->watchdog_completion = chan->last_completion; 259 - } 260 - chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; 261 - } 262 - schedule_delayed_work(&device->work, WATCHDOG_DELAY); 263 - } 264 - 265 - /** 266 - * ioat2_cleanup - clean finished descriptors (advance tail pointer) 267 - * @chan: ioat channel to be cleaned up 268 - */ 269 - static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 136 + static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 270 137 { 271 138 struct ioat_chan_common *chan = &ioat->base; 272 - unsigned long phys_complete; 139 + struct dma_async_tx_descriptor *tx; 273 140 struct ioat_ring_ent *desc; 274 141 bool seen_current = false; 275 142 u16 active; 276 143 int i; 277 - struct dma_async_tx_descriptor *tx; 278 - 279 - prefetch(chan->completion); 280 - 281 - spin_lock_bh(&chan->cleanup_lock); 282 - phys_complete = ioat_get_current_completion(chan); 283 - if (phys_complete == chan->last_completion) { 284 - spin_unlock_bh(&chan->cleanup_lock); 285 - /* 286 - * perhaps we're stuck so hard that the watchdog can't go off? 287 - * try to catch it after WATCHDOG_DELAY seconds 288 - */ 289 - if (chan->device->version < IOAT_VER_3_0) { 290 - unsigned long tmo; 291 - 292 - tmo = chan->last_completion_time + HZ*WATCHDOG_DELAY; 293 - if (time_after(jiffies, tmo)) { 294 - ioat2_chan_watchdog(&(chan->device->work.work)); 295 - chan->last_completion_time = jiffies; 296 - } 297 - } 298 - return; 299 - } 300 - chan->last_completion_time = jiffies; 301 - 302 - spin_lock_bh(&ioat->ring_lock); 303 144 304 145 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 305 146 __func__, ioat->head, ioat->tail, ioat->issued); ··· 162 329 } 163 330 ioat->tail += i; 164 331 BUG_ON(!seen_current); /* no active descs have written a completion? */ 165 - spin_unlock_bh(&ioat->ring_lock); 166 332 167 333 chan->last_completion = phys_complete; 334 + if (ioat->head == ioat->tail) { 335 + dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 336 + __func__); 337 + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 338 + } 339 + } 168 340 341 + /** 342 + * ioat2_cleanup - clean finished descriptors (advance tail pointer) 343 + * @chan: ioat channel to be cleaned up 344 + */ 345 + static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 346 + { 347 + struct ioat_chan_common *chan = &ioat->base; 348 + unsigned long phys_complete; 349 + 350 + prefetch(chan->completion); 351 + 352 + if (!spin_trylock_bh(&chan->cleanup_lock)) 353 + return; 354 + 355 + if (!ioat_cleanup_preamble(chan, &phys_complete)) { 356 + spin_unlock_bh(&chan->cleanup_lock); 357 + return; 358 + } 359 + 360 + if (!spin_trylock_bh(&ioat->ring_lock)) { 361 + spin_unlock_bh(&chan->cleanup_lock); 362 + return; 363 + } 364 + 365 + __cleanup(ioat, phys_complete); 366 + 367 + spin_unlock_bh(&ioat->ring_lock); 169 368 spin_unlock_bh(&chan->cleanup_lock); 170 369 } 171 370 ··· 207 342 208 343 ioat2_cleanup(ioat); 209 344 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 345 + } 346 + 347 + static void __restart_chan(struct ioat2_dma_chan *ioat) 348 + { 349 + struct ioat_chan_common *chan = &ioat->base; 350 + 351 + /* set the tail to be re-issued */ 352 + ioat->issued = ioat->tail; 353 + ioat->dmacount = 0; 354 + set_bit(IOAT_COMPLETION_PENDING, &chan->state); 355 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 356 + 357 + dev_dbg(to_dev(chan), 358 + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", 359 + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 360 + 361 + if (ioat2_ring_pending(ioat)) { 362 + struct ioat_ring_ent *desc; 363 + 364 + desc = ioat2_get_ring_ent(ioat, ioat->tail); 365 + ioat2_set_chainaddr(ioat, desc->txd.phys); 366 + __ioat2_issue_pending(ioat); 367 + } else 368 + __ioat2_start_null_desc(ioat); 369 + } 370 + 371 + static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 372 + { 373 + struct ioat_chan_common *chan = &ioat->base; 374 + unsigned long phys_complete; 375 + u32 status; 376 + 377 + status = ioat_chansts(chan); 378 + if (is_ioat_active(status) || is_ioat_idle(status)) 379 + ioat_suspend(chan); 380 + while (is_ioat_active(status) || is_ioat_idle(status)) { 381 + status = ioat_chansts(chan); 382 + cpu_relax(); 383 + } 384 + 385 + if (ioat_cleanup_preamble(chan, &phys_complete)) 386 + __cleanup(ioat, phys_complete); 387 + 388 + __restart_chan(ioat); 389 + } 390 + 391 + static void ioat2_timer_event(unsigned long data) 392 + { 393 + struct ioat2_dma_chan *ioat = (void *) data; 394 + struct ioat_chan_common *chan = &ioat->base; 395 + 396 + spin_lock_bh(&chan->cleanup_lock); 397 + if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 398 + unsigned long phys_complete; 399 + u64 status; 400 + 401 + spin_lock_bh(&ioat->ring_lock); 402 + status = ioat_chansts(chan); 403 + 404 + /* when halted due to errors check for channel 405 + * programming errors before advancing the completion state 406 + */ 407 + if (is_ioat_halted(status)) { 408 + u32 chanerr; 409 + 410 + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 411 + BUG_ON(is_ioat_bug(chanerr)); 412 + } 413 + 414 + /* if we haven't made progress and we have already 415 + * acknowledged a pending completion once, then be more 416 + * forceful with a restart 417 + */ 418 + if (ioat_cleanup_preamble(chan, &phys_complete)) 419 + __cleanup(ioat, phys_complete); 420 + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 421 + ioat2_restart_channel(ioat); 422 + else { 423 + set_bit(IOAT_COMPLETION_ACK, &chan->state); 424 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 425 + } 426 + spin_unlock_bh(&ioat->ring_lock); 427 + } 428 + spin_unlock_bh(&chan->cleanup_lock); 210 429 } 211 430 212 431 /** ··· 330 381 break; 331 382 332 383 ioat_init_channel(device, &ioat->base, i, 333 - ioat2_reset_part2, 384 + ioat2_timer_event, 334 385 ioat2_cleanup_tasklet, 335 386 (unsigned long) ioat); 336 387 ioat->xfercap_log = xfercap_log; ··· 344 395 { 345 396 struct dma_chan *c = tx->chan; 346 397 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 398 + struct ioat_chan_common *chan = &ioat->base; 347 399 dma_cookie_t cookie = c->cookie; 348 400 349 401 cookie++; ··· 354 404 c->cookie = cookie; 355 405 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 356 406 407 + if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 408 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 357 409 ioat2_update_pending(ioat); 358 410 spin_unlock_bh(&ioat->ring_lock); 359 411 ··· 495 543 ioat->issued); 496 544 spin_unlock_bh(&ioat->ring_lock); 497 545 498 - /* do direct reclaim in the allocation failure case */ 499 - ioat2_cleanup(ioat); 500 - 546 + /* progress reclaim in the allocation failure case we 547 + * may be called under bh_disabled so we need to trigger 548 + * the timer event directly 549 + */ 550 + spin_lock_bh(&chan->cleanup_lock); 551 + if (jiffies > chan->timer.expires && 552 + timer_pending(&chan->timer)) { 553 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 554 + spin_unlock_bh(&chan->cleanup_lock); 555 + ioat2_timer_event((unsigned long) ioat); 556 + } else 557 + spin_unlock_bh(&chan->cleanup_lock); 501 558 return -ENOMEM; 502 559 } 503 560 ··· 585 624 return; 586 625 587 626 tasklet_disable(&chan->cleanup_task); 627 + del_timer_sync(&chan->timer); 588 628 ioat2_cleanup(ioat); 589 629 590 630 /* Delay 100ms after reset to allow internal DMA logic to quiesce ··· 625 663 chan->completion_dma = 0; 626 664 ioat->pending = 0; 627 665 ioat->dmacount = 0; 628 - chan->watchdog_completion = 0; 629 - chan->last_compl_desc_addr_hw = 0; 630 - chan->watchdog_tcp_cookie = 0; 631 - chan->watchdog_last_tcp_cookie = 0; 632 666 } 633 667 634 668 static enum dma_status ··· 673 715 return err; 674 716 if (dca) 675 717 device->dca = ioat2_dca_init(pdev, device->reg_base); 676 - 677 - INIT_DELAYED_WORK(&device->work, ioat2_chan_watchdog); 678 - schedule_delayed_work(&device->work, WATCHDOG_DELAY); 679 718 680 719 return err; 681 720 }
+10
drivers/dma/ioat/dma_v2.h
··· 127 127 return ioat->ring[idx & ioat2_ring_mask(ioat)]; 128 128 } 129 129 130 + static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) 131 + { 132 + struct ioat_chan_common *chan = &ioat->base; 133 + 134 + writel(addr & 0x00000000FFFFFFFF, 135 + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 136 + writel(addr >> 32, 137 + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 138 + } 139 + 130 140 int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); 131 141 int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); 132 142 struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+11 -11
drivers/dma/ioat/registers.h
··· 101 101 #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) 102 102 #define IOAT_CHANSTS_SOFT_ERR 0x10ULL 103 103 #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL 104 - #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x7ULL 105 - #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 106 - #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 107 - #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 108 - #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 104 + #define IOAT_CHANSTS_STATUS 0x7ULL 105 + #define IOAT_CHANSTS_ACTIVE 0x0 106 + #define IOAT_CHANSTS_DONE 0x1 107 + #define IOAT_CHANSTS_SUSPENDED 0x2 108 + #define IOAT_CHANSTS_HALTED 0x3 109 109 110 110 111 111 ··· 208 208 #define IOAT_CDAR_OFFSET_HIGH 0x24 209 209 210 210 #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ 211 - #define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 212 - #define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 213 - #define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 214 - #define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 211 + #define IOAT_CHANERR_SRC_ADDR_ERR 0x0001 212 + #define IOAT_CHANERR_DEST_ADDR_ERR 0x0002 213 + #define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004 214 + #define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008 215 215 #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 216 216 #define IOAT_CHANERR_CHANCMD_ERR 0x0020 217 217 #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 218 218 #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 219 219 #define IOAT_CHANERR_READ_DATA_ERR 0x0100 220 220 #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 221 - #define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 222 - #define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 221 + #define IOAT_CHANERR_CONTROL_ERR 0x0400 222 + #define IOAT_CHANERR_LENGTH_ERR 0x0800 223 223 #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 224 224 #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 225 225 #define IOAT_CHANERR_SOFT_ERR 0x4000
+8 -8
drivers/idle/i7300_idle.c
··· 126 126 udelay(10); 127 127 128 128 sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 129 - IOAT_CHANSTS_DMA_TRANSFER_STATUS; 129 + IOAT_CHANSTS_STATUS; 130 130 131 - if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) 131 + if (sts != IOAT_CHANSTS_ACTIVE) 132 132 break; 133 133 134 134 } ··· 160 160 udelay(1000); 161 161 162 162 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 163 - IOAT_CHANSTS_DMA_TRANSFER_STATUS; 163 + IOAT_CHANSTS_STATUS; 164 164 165 - if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) { 165 + if (chan_sts != IOAT_CHANSTS_DONE) { 166 166 /* Not complete, reset the channel */ 167 167 writeb(IOAT_CHANCMD_RESET, 168 168 ioat_chanbase + IOAT1_CHANCMD_OFFSET); ··· 288 288 ioat_chanbase + IOAT1_CHANCMD_OFFSET); 289 289 290 290 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 291 - IOAT_CHANSTS_DMA_TRANSFER_STATUS; 291 + IOAT_CHANSTS_STATUS; 292 292 293 - if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { 293 + if (chan_sts != IOAT_CHANSTS_ACTIVE) { 294 294 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); 295 295 break; 296 296 } ··· 298 298 } 299 299 300 300 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 301 - IOAT_CHANSTS_DMA_TRANSFER_STATUS; 301 + IOAT_CHANSTS_STATUS; 302 302 303 303 /* 304 304 * We tried to reset multiple times. If IO A/T channel is still active 305 305 * flag an error and return without cleanup. Memory leak is better 306 306 * than random corruption in that extreme error situation. 307 307 */ 308 - if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { 308 + if (chan_sts == IOAT_CHANSTS_ACTIVE) { 309 309 printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." 310 310 " Not freeing resources\n"); 311 311 return;