Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32 817 lines 24 kB view raw
1/* 2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/spinlock.h> 34 35#include "ipath_kernel.h" 36#include "ipath_verbs.h" 37#include "ipath_common.h" 38 39#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */ 40 41static void vl15_watchdog_enq(struct ipath_devdata *dd) 42{ 43 /* ipath_sdma_lock must already be held */ 44 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) { 45 unsigned long interval = (HZ + 19) / 20; 46 dd->ipath_sdma_vl15_timer.expires = jiffies + interval; 47 add_timer(&dd->ipath_sdma_vl15_timer); 48 } 49} 50 51static void vl15_watchdog_deq(struct ipath_devdata *dd) 52{ 53 /* ipath_sdma_lock must already be held */ 54 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) { 55 unsigned long interval = (HZ + 19) / 20; 56 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval); 57 } else { 58 del_timer(&dd->ipath_sdma_vl15_timer); 59 } 60} 61 62static void vl15_watchdog_timeout(unsigned long opaque) 63{ 64 struct ipath_devdata *dd = (struct ipath_devdata *)opaque; 65 66 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) { 67 ipath_dbg("vl15 watchdog timeout - clearing\n"); 68 ipath_cancel_sends(dd, 1); 69 ipath_hol_down(dd); 70 } else { 71 ipath_dbg("vl15 watchdog timeout - " 72 "condition already cleared\n"); 73 } 74} 75 76static void unmap_desc(struct ipath_devdata *dd, unsigned head) 77{ 78 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0]; 79 u64 desc[2]; 80 dma_addr_t addr; 81 size_t len; 82 83 desc[0] = le64_to_cpu(descqp[0]); 84 desc[1] = le64_to_cpu(descqp[1]); 85 86 addr = (desc[1] << 32) | (desc[0] >> 32); 87 len = (desc[0] >> 14) & (0x7ffULL << 2); 88 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE); 89} 90 91/* 92 * ipath_sdma_lock should be locked before calling this. 93 */ 94int ipath_sdma_make_progress(struct ipath_devdata *dd) 95{ 96 struct list_head *lp = NULL; 97 struct ipath_sdma_txreq *txp = NULL; 98 u16 dmahead; 99 u16 start_idx = 0; 100 int progress = 0; 101 102 if (!list_empty(&dd->ipath_sdma_activelist)) { 103 lp = dd->ipath_sdma_activelist.next; 104 txp = list_entry(lp, struct ipath_sdma_txreq, list); 105 start_idx = txp->start_idx; 106 } 107 108 /* 109 * Read the SDMA head register in order to know that the 110 * interrupt clear has been written to the chip. 111 * Otherwise, we may not get an interrupt for the last 112 * descriptor in the queue. 113 */ 114 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead); 115 /* sanity check return value for error handling (chip reset, etc.) */ 116 if (dmahead >= dd->ipath_sdma_descq_cnt) 117 goto done; 118 119 while (dd->ipath_sdma_descq_head != dmahead) { 120 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC && 121 dd->ipath_sdma_descq_head == start_idx) { 122 unmap_desc(dd, dd->ipath_sdma_descq_head); 123 start_idx++; 124 if (start_idx == dd->ipath_sdma_descq_cnt) 125 start_idx = 0; 126 } 127 128 /* increment free count and head */ 129 dd->ipath_sdma_descq_removed++; 130 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt) 131 dd->ipath_sdma_descq_head = 0; 132 133 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) { 134 /* move to notify list */ 135 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) 136 vl15_watchdog_deq(dd); 137 list_move_tail(lp, &dd->ipath_sdma_notifylist); 138 if (!list_empty(&dd->ipath_sdma_activelist)) { 139 lp = dd->ipath_sdma_activelist.next; 140 txp = list_entry(lp, struct ipath_sdma_txreq, 141 list); 142 start_idx = txp->start_idx; 143 } else { 144 lp = NULL; 145 txp = NULL; 146 } 147 } 148 progress = 1; 149 } 150 151 if (progress) 152 tasklet_hi_schedule(&dd->ipath_sdma_notify_task); 153 154done: 155 return progress; 156} 157 158static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list) 159{ 160 struct ipath_sdma_txreq *txp, *txp_next; 161 162 list_for_each_entry_safe(txp, txp_next, list, list) { 163 list_del_init(&txp->list); 164 165 if (txp->callback) 166 (*txp->callback)(txp->callback_cookie, 167 txp->callback_status); 168 } 169} 170 171static void sdma_notify_taskbody(struct ipath_devdata *dd) 172{ 173 unsigned long flags; 174 struct list_head list; 175 176 INIT_LIST_HEAD(&list); 177 178 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 179 180 list_splice_init(&dd->ipath_sdma_notifylist, &list); 181 182 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 183 184 ipath_sdma_notify(dd, &list); 185 186 /* 187 * The IB verbs layer needs to see the callback before getting 188 * the call to ipath_ib_piobufavail() because the callback 189 * handles releasing resources the next send will need. 190 * Otherwise, we could do these calls in 191 * ipath_sdma_make_progress(). 192 */ 193 ipath_ib_piobufavail(dd->verbs_dev); 194} 195 196static void sdma_notify_task(unsigned long opaque) 197{ 198 struct ipath_devdata *dd = (struct ipath_devdata *)opaque; 199 200 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) 201 sdma_notify_taskbody(dd); 202} 203 204static void dump_sdma_state(struct ipath_devdata *dd) 205{ 206 unsigned long reg; 207 208 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus); 209 ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg); 210 211 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl); 212 ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg); 213 214 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0); 215 ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg); 216 217 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1); 218 ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg); 219 220 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2); 221 ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg); 222 223 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail); 224 ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg); 225 226 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead); 227 ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg); 228} 229 230static void sdma_abort_task(unsigned long opaque) 231{ 232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque; 233 u64 status; 234 unsigned long flags; 235 236 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) 237 return; 238 239 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 240 241 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK; 242 243 /* nothing to do */ 244 if (status == IPATH_SDMA_ABORT_NONE) 245 goto unlock; 246 247 /* ipath_sdma_abort() is done, waiting for interrupt */ 248 if (status == IPATH_SDMA_ABORT_DISARMED) { 249 if (jiffies < dd->ipath_sdma_abort_intr_timeout) 250 goto resched_noprint; 251 /* give up, intr got lost somewhere */ 252 ipath_dbg("give up waiting for SDMADISABLED intr\n"); 253 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); 254 status = IPATH_SDMA_ABORT_ABORTED; 255 } 256 257 /* everything is stopped, time to clean up and restart */ 258 if (status == IPATH_SDMA_ABORT_ABORTED) { 259 struct ipath_sdma_txreq *txp, *txpnext; 260 u64 hwstatus; 261 int notify = 0; 262 263 hwstatus = ipath_read_kreg64(dd, 264 dd->ipath_kregs->kr_senddmastatus); 265 266 if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG | 267 IPATH_SDMA_STATUS_ABORT_IN_PROG | 268 IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) || 269 !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) { 270 if (dd->ipath_sdma_reset_wait > 0) { 271 /* not done shutting down sdma */ 272 --dd->ipath_sdma_reset_wait; 273 goto resched; 274 } 275 ipath_cdbg(VERBOSE, "gave up waiting for quiescent " 276 "status after SDMA reset, continuing\n"); 277 dump_sdma_state(dd); 278 } 279 280 /* dequeue all "sent" requests */ 281 list_for_each_entry_safe(txp, txpnext, 282 &dd->ipath_sdma_activelist, list) { 283 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED; 284 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) 285 vl15_watchdog_deq(dd); 286 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); 287 notify = 1; 288 } 289 if (notify) 290 tasklet_hi_schedule(&dd->ipath_sdma_notify_task); 291 292 /* reset our notion of head and tail */ 293 dd->ipath_sdma_descq_tail = 0; 294 dd->ipath_sdma_descq_head = 0; 295 dd->ipath_sdma_head_dma[0] = 0; 296 dd->ipath_sdma_generation = 0; 297 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added; 298 299 /* Reset SendDmaLenGen */ 300 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 301 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18)); 302 303 /* done with sdma state for a bit */ 304 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 305 306 /* 307 * Don't restart sdma here (with the exception 308 * below). Wait until link is up to ACTIVE. VL15 MADs 309 * used to bring the link up use PIO, and multiple link 310 * transitions otherwise cause the sdma engine to be 311 * stopped and started multiple times. 312 * The disable is done here, including the shadow, 313 * so the state is kept consistent. 314 * See ipath_restart_sdma() for the actual starting 315 * of sdma. 316 */ 317 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 318 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; 319 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 320 dd->ipath_sendctrl); 321 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 322 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 323 324 /* make sure I see next message */ 325 dd->ipath_sdma_abort_jiffies = 0; 326 327 /* 328 * Not everything that takes SDMA offline is a link 329 * status change. If the link was up, restart SDMA. 330 */ 331 if (dd->ipath_flags & IPATH_LINKACTIVE) 332 ipath_restart_sdma(dd); 333 334 goto done; 335 } 336 337resched: 338 /* 339 * for now, keep spinning 340 * JAG - this is bad to just have default be a loop without 341 * state change 342 */ 343 if (jiffies > dd->ipath_sdma_abort_jiffies) { 344 ipath_dbg("looping with status 0x%08lx\n", 345 dd->ipath_sdma_status); 346 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; 347 } 348resched_noprint: 349 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 350 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) 351 tasklet_hi_schedule(&dd->ipath_sdma_abort_task); 352 return; 353 354unlock: 355 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 356done: 357 return; 358} 359 360/* 361 * This is called from interrupt context. 362 */ 363void ipath_sdma_intr(struct ipath_devdata *dd) 364{ 365 unsigned long flags; 366 367 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 368 369 (void) ipath_sdma_make_progress(dd); 370 371 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 372} 373 374static int alloc_sdma(struct ipath_devdata *dd) 375{ 376 int ret = 0; 377 378 /* Allocate memory for SendDMA descriptor FIFO */ 379 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev, 380 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL); 381 382 if (!dd->ipath_sdma_descq) { 383 ipath_dev_err(dd, "failed to allocate SendDMA descriptor " 384 "FIFO memory\n"); 385 ret = -ENOMEM; 386 goto done; 387 } 388 389 dd->ipath_sdma_descq_cnt = 390 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc); 391 392 /* Allocate memory for DMA of head register to memory */ 393 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev, 394 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL); 395 if (!dd->ipath_sdma_head_dma) { 396 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n"); 397 ret = -ENOMEM; 398 goto cleanup_descq; 399 } 400 dd->ipath_sdma_head_dma[0] = 0; 401 402 init_timer(&dd->ipath_sdma_vl15_timer); 403 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout; 404 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd; 405 atomic_set(&dd->ipath_sdma_vl15_count, 0); 406 407 goto done; 408 409cleanup_descq: 410 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, 411 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys); 412 dd->ipath_sdma_descq = NULL; 413 dd->ipath_sdma_descq_phys = 0; 414done: 415 return ret; 416} 417 418int setup_sdma(struct ipath_devdata *dd) 419{ 420 int ret = 0; 421 unsigned i, n; 422 u64 tmp64; 423 u64 senddmabufmask[3] = { 0 }; 424 unsigned long flags; 425 426 ret = alloc_sdma(dd); 427 if (ret) 428 goto done; 429 430 if (!dd->ipath_sdma_descq) { 431 ipath_dev_err(dd, "SendDMA memory not allocated\n"); 432 goto done; 433 } 434 435 /* 436 * Set initial status as if we had been up, then gone down. 437 * This lets initial start on transition to ACTIVE be the 438 * same as restart after link flap. 439 */ 440 dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED; 441 dd->ipath_sdma_abort_jiffies = 0; 442 dd->ipath_sdma_generation = 0; 443 dd->ipath_sdma_descq_tail = 0; 444 dd->ipath_sdma_descq_head = 0; 445 dd->ipath_sdma_descq_removed = 0; 446 dd->ipath_sdma_descq_added = 0; 447 448 /* Set SendDmaBase */ 449 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 450 dd->ipath_sdma_descq_phys); 451 /* Set SendDmaLenGen */ 452 tmp64 = dd->ipath_sdma_descq_cnt; 453 tmp64 |= 1<<18; /* enable generation checking */ 454 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64); 455 /* Set SendDmaTail */ 456 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 457 dd->ipath_sdma_descq_tail); 458 /* Set SendDmaHeadAddr */ 459 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 460 dd->ipath_sdma_head_phys); 461 462 /* 463 * Reserve all the former "kernel" piobufs, using high number range 464 * so we get as many 4K buffers as possible 465 */ 466 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; 467 i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved; 468 ipath_chg_pioavailkernel(dd, i, n - i , 0); 469 for (; i < n; ++i) { 470 unsigned word = i / 64; 471 unsigned bit = i & 63; 472 BUG_ON(word >= 3); 473 senddmabufmask[word] |= 1ULL << bit; 474 } 475 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 476 senddmabufmask[0]); 477 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 478 senddmabufmask[1]); 479 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 480 senddmabufmask[2]); 481 482 INIT_LIST_HEAD(&dd->ipath_sdma_activelist); 483 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist); 484 485 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task, 486 (unsigned long) dd); 487 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task, 488 (unsigned long) dd); 489 490 /* 491 * No use to turn on SDMA here, as link is probably not ACTIVE 492 * Just mark it RUNNING and enable the interrupt, and let the 493 * ipath_restart_sdma() on link transition to ACTIVE actually 494 * enable it. 495 */ 496 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 497 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE; 498 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 499 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 500 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); 501 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 502 503done: 504 return ret; 505} 506 507void teardown_sdma(struct ipath_devdata *dd) 508{ 509 struct ipath_sdma_txreq *txp, *txpnext; 510 unsigned long flags; 511 dma_addr_t sdma_head_phys = 0; 512 dma_addr_t sdma_descq_phys = 0; 513 void *sdma_descq = NULL; 514 void *sdma_head_dma = NULL; 515 516 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 517 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); 518 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); 519 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status); 520 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 521 522 tasklet_kill(&dd->ipath_sdma_abort_task); 523 tasklet_kill(&dd->ipath_sdma_notify_task); 524 525 /* turn off sdma */ 526 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 527 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; 528 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 529 dd->ipath_sendctrl); 530 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 531 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 532 533 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 534 /* dequeue all "sent" requests */ 535 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist, 536 list) { 537 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN; 538 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) 539 vl15_watchdog_deq(dd); 540 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); 541 } 542 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 543 544 sdma_notify_taskbody(dd); 545 546 del_timer_sync(&dd->ipath_sdma_vl15_timer); 547 548 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 549 550 dd->ipath_sdma_abort_jiffies = 0; 551 552 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0); 553 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0); 554 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0); 555 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0); 556 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0); 557 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0); 558 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0); 559 560 if (dd->ipath_sdma_head_dma) { 561 sdma_head_dma = (void *) dd->ipath_sdma_head_dma; 562 sdma_head_phys = dd->ipath_sdma_head_phys; 563 dd->ipath_sdma_head_dma = NULL; 564 dd->ipath_sdma_head_phys = 0; 565 } 566 567 if (dd->ipath_sdma_descq) { 568 sdma_descq = dd->ipath_sdma_descq; 569 sdma_descq_phys = dd->ipath_sdma_descq_phys; 570 dd->ipath_sdma_descq = NULL; 571 dd->ipath_sdma_descq_phys = 0; 572 } 573 574 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 575 576 if (sdma_head_dma) 577 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 578 sdma_head_dma, sdma_head_phys); 579 580 if (sdma_descq) 581 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, 582 sdma_descq, sdma_descq_phys); 583} 584 585/* 586 * [Re]start SDMA, if we use it, and it's not already OK. 587 * This is called on transition to link ACTIVE, either the first or 588 * subsequent times. 589 */ 590void ipath_restart_sdma(struct ipath_devdata *dd) 591{ 592 unsigned long flags; 593 int needed = 1; 594 595 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) 596 goto bail; 597 598 /* 599 * First, make sure we should, which is to say, 600 * check that we are "RUNNING" (not in teardown) 601 * and not "SHUTDOWN" 602 */ 603 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 604 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status) 605 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) 606 needed = 0; 607 else { 608 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); 609 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status); 610 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); 611 } 612 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 613 if (!needed) { 614 ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n", 615 dd->ipath_sdma_status); 616 goto bail; 617 } 618 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 619 /* 620 * First clear, just to be safe. Enable is only done 621 * in chip on 0->1 transition 622 */ 623 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; 624 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 625 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 626 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE; 627 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 628 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 629 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 630 631 /* notify upper layers */ 632 ipath_ib_piobufavail(dd->verbs_dev); 633 634bail: 635 return; 636} 637 638static inline void make_sdma_desc(struct ipath_devdata *dd, 639 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset) 640{ 641 WARN_ON(addr & 3); 642 /* SDmaPhyAddr[47:32] */ 643 sdmadesc[1] = addr >> 32; 644 /* SDmaPhyAddr[31:0] */ 645 sdmadesc[0] = (addr & 0xfffffffcULL) << 32; 646 /* SDmaGeneration[1:0] */ 647 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30; 648 /* SDmaDwordCount[10:0] */ 649 sdmadesc[0] |= (dwlen & 0x7ffULL) << 16; 650 /* SDmaBufOffset[12:2] */ 651 sdmadesc[0] |= dwoffset & 0x7ffULL; 652} 653 654/* 655 * This function queues one IB packet onto the send DMA queue per call. 656 * The caller is responsible for checking: 657 * 1) The number of send DMA descriptor entries is less than the size of 658 * the descriptor queue. 659 * 2) The IB SGE addresses and lengths are 32-bit aligned 660 * (except possibly the last SGE's length) 661 * 3) The SGE addresses are suitable for passing to dma_map_single(). 662 */ 663int ipath_sdma_verbs_send(struct ipath_devdata *dd, 664 struct ipath_sge_state *ss, u32 dwords, 665 struct ipath_verbs_txreq *tx) 666{ 667 668 unsigned long flags; 669 struct ipath_sge *sge; 670 int ret = 0; 671 u16 tail; 672 __le64 *descqp; 673 u64 sdmadesc[2]; 674 u32 dwoffset; 675 dma_addr_t addr; 676 677 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) { 678 ipath_dbg("packet size %X > ibmax %X, fail\n", 679 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen); 680 ret = -EMSGSIZE; 681 goto fail; 682 } 683 684 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 685 686retry: 687 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) { 688 ret = -EBUSY; 689 goto unlock; 690 } 691 692 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) { 693 if (ipath_sdma_make_progress(dd)) 694 goto retry; 695 ret = -ENOBUFS; 696 goto unlock; 697 } 698 699 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, 700 tx->map_len, DMA_TO_DEVICE); 701 if (dma_mapping_error(&dd->pcidev->dev, addr)) 702 goto ioerr; 703 704 dwoffset = tx->map_len >> 2; 705 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); 706 707 /* SDmaFirstDesc */ 708 sdmadesc[0] |= 1ULL << 12; 709 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) 710 sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */ 711 712 /* write to the descq */ 713 tail = dd->ipath_sdma_descq_tail; 714 descqp = &dd->ipath_sdma_descq[tail].qw[0]; 715 *descqp++ = cpu_to_le64(sdmadesc[0]); 716 *descqp++ = cpu_to_le64(sdmadesc[1]); 717 718 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC) 719 tx->txreq.start_idx = tail; 720 721 /* increment the tail */ 722 if (++tail == dd->ipath_sdma_descq_cnt) { 723 tail = 0; 724 descqp = &dd->ipath_sdma_descq[0].qw[0]; 725 ++dd->ipath_sdma_generation; 726 } 727 728 sge = &ss->sge; 729 while (dwords) { 730 u32 dw; 731 u32 len; 732 733 len = dwords << 2; 734 if (len > sge->length) 735 len = sge->length; 736 if (len > sge->sge_length) 737 len = sge->sge_length; 738 BUG_ON(len == 0); 739 dw = (len + 3) >> 2; 740 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, 741 DMA_TO_DEVICE); 742 if (dma_mapping_error(&dd->pcidev->dev, addr)) 743 goto unmap; 744 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); 745 /* SDmaUseLargeBuf has to be set in every descriptor */ 746 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) 747 sdmadesc[0] |= 1ULL << 14; 748 /* write to the descq */ 749 *descqp++ = cpu_to_le64(sdmadesc[0]); 750 *descqp++ = cpu_to_le64(sdmadesc[1]); 751 752 /* increment the tail */ 753 if (++tail == dd->ipath_sdma_descq_cnt) { 754 tail = 0; 755 descqp = &dd->ipath_sdma_descq[0].qw[0]; 756 ++dd->ipath_sdma_generation; 757 } 758 sge->vaddr += len; 759 sge->length -= len; 760 sge->sge_length -= len; 761 if (sge->sge_length == 0) { 762 if (--ss->num_sge) 763 *sge = *ss->sg_list++; 764 } else if (sge->length == 0 && sge->mr != NULL) { 765 if (++sge->n >= IPATH_SEGSZ) { 766 if (++sge->m >= sge->mr->mapsz) 767 break; 768 sge->n = 0; 769 } 770 sge->vaddr = 771 sge->mr->map[sge->m]->segs[sge->n].vaddr; 772 sge->length = 773 sge->mr->map[sge->m]->segs[sge->n].length; 774 } 775 776 dwoffset += dw; 777 dwords -= dw; 778 } 779 780 if (!tail) 781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; 782 descqp -= 2; 783 /* SDmaLastDesc */ 784 descqp[0] |= cpu_to_le64(1ULL << 11); 785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { 786 /* SDmaIntReq */ 787 descqp[0] |= cpu_to_le64(1ULL << 15); 788 } 789 790 /* Commit writes to memory and advance the tail on the chip */ 791 wmb(); 792 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); 793 794 tx->txreq.next_descq_idx = tail; 795 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK; 796 dd->ipath_sdma_descq_tail = tail; 797 dd->ipath_sdma_descq_added += tx->txreq.sg_count; 798 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); 799 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) 800 vl15_watchdog_enq(dd); 801 goto unlock; 802 803unmap: 804 while (tail != dd->ipath_sdma_descq_tail) { 805 if (!tail) 806 tail = dd->ipath_sdma_descq_cnt - 1; 807 else 808 tail--; 809 unmap_desc(dd, tail); 810 } 811ioerr: 812 ret = -EIO; 813unlock: 814 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 815fail: 816 return ret; 817}