Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.4 1958 lines 50 kB view raw
1/* 2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips 3 * 4 * Author: Martyn Welch <martyn.welch@ge.com> 5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 6 * 7 * Based on work by Tom Armistead and Ajit Prem 8 * Copyright 2004 Motorola Inc. 9 * 10 * Derived from ca91c042.c by Michael Wyrick 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 */ 17 18#include <linux/module.h> 19#include <linux/mm.h> 20#include <linux/types.h> 21#include <linux/errno.h> 22#include <linux/pci.h> 23#include <linux/dma-mapping.h> 24#include <linux/poll.h> 25#include <linux/interrupt.h> 26#include <linux/spinlock.h> 27#include <linux/sched.h> 28#include <linux/slab.h> 29#include <linux/time.h> 30#include <linux/io.h> 31#include <linux/uaccess.h> 32#include <linux/vme.h> 33 34#include "../vme_bridge.h" 35#include "vme_ca91cx42.h" 36 37static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *); 38static void ca91cx42_remove(struct pci_dev *); 39 40/* Module parameters */ 41static int geoid; 42 43static const char driver_name[] = "vme_ca91cx42"; 44 45static const struct pci_device_id ca91cx42_ids[] = { 46 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) }, 47 { }, 48}; 49 50static struct pci_driver ca91cx42_driver = { 51 .name = driver_name, 52 .id_table = ca91cx42_ids, 53 .probe = ca91cx42_probe, 54 .remove = ca91cx42_remove, 55}; 56 57static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge) 58{ 59 wake_up(&bridge->dma_queue); 60 61 return CA91CX42_LINT_DMA; 62} 63 64static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat) 65{ 66 int i; 67 u32 serviced = 0; 68 69 for (i = 0; i < 4; i++) { 70 if (stat & CA91CX42_LINT_LM[i]) { 71 /* We only enable interrupts if the callback is set */ 72 bridge->lm_callback[i](i); 73 serviced |= CA91CX42_LINT_LM[i]; 74 } 75 } 76 77 return serviced; 78} 79 80/* XXX This needs to be split into 4 queues */ 81static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask) 82{ 83 wake_up(&bridge->mbox_queue); 84 85 return CA91CX42_LINT_MBOX; 86} 87 88static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge) 89{ 90 wake_up(&bridge->iack_queue); 91 92 return CA91CX42_LINT_SW_IACK; 93} 94 95static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge) 96{ 97 int val; 98 struct ca91cx42_driver *bridge; 99 100 bridge = ca91cx42_bridge->driver_priv; 101 102 val = ioread32(bridge->base + DGCS); 103 104 if (!(val & 0x00000800)) { 105 dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA " 106 "Read Error DGCS=%08X\n", val); 107 } 108 109 return CA91CX42_LINT_VERR; 110} 111 112static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge) 113{ 114 int val; 115 struct ca91cx42_driver *bridge; 116 117 bridge = ca91cx42_bridge->driver_priv; 118 119 val = ioread32(bridge->base + DGCS); 120 121 if (!(val & 0x00000800)) 122 dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA " 123 "Read Error DGCS=%08X\n", val); 124 125 return CA91CX42_LINT_LERR; 126} 127 128 129static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge, 130 int stat) 131{ 132 int vec, i, serviced = 0; 133 struct ca91cx42_driver *bridge; 134 135 bridge = ca91cx42_bridge->driver_priv; 136 137 138 for (i = 7; i > 0; i--) { 139 if (stat & (1 << i)) { 140 vec = ioread32(bridge->base + 141 CA91CX42_V_STATID[i]) & 0xff; 142 143 vme_irq_handler(ca91cx42_bridge, i, vec); 144 145 serviced |= (1 << i); 146 } 147 } 148 149 return serviced; 150} 151 152static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr) 153{ 154 u32 stat, enable, serviced = 0; 155 struct vme_bridge *ca91cx42_bridge; 156 struct ca91cx42_driver *bridge; 157 158 ca91cx42_bridge = ptr; 159 160 bridge = ca91cx42_bridge->driver_priv; 161 162 enable = ioread32(bridge->base + LINT_EN); 163 stat = ioread32(bridge->base + LINT_STAT); 164 165 /* Only look at unmasked interrupts */ 166 stat &= enable; 167 168 if (unlikely(!stat)) 169 return IRQ_NONE; 170 171 if (stat & CA91CX42_LINT_DMA) 172 serviced |= ca91cx42_DMA_irqhandler(bridge); 173 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 | 174 CA91CX42_LINT_LM3)) 175 serviced |= ca91cx42_LM_irqhandler(bridge, stat); 176 if (stat & CA91CX42_LINT_MBOX) 177 serviced |= ca91cx42_MB_irqhandler(bridge, stat); 178 if (stat & CA91CX42_LINT_SW_IACK) 179 serviced |= ca91cx42_IACK_irqhandler(bridge); 180 if (stat & CA91CX42_LINT_VERR) 181 serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge); 182 if (stat & CA91CX42_LINT_LERR) 183 serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge); 184 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 | 185 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 | 186 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 | 187 CA91CX42_LINT_VIRQ7)) 188 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat); 189 190 /* Clear serviced interrupts */ 191 iowrite32(serviced, bridge->base + LINT_STAT); 192 193 return IRQ_HANDLED; 194} 195 196static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge) 197{ 198 int result, tmp; 199 struct pci_dev *pdev; 200 struct ca91cx42_driver *bridge; 201 202 bridge = ca91cx42_bridge->driver_priv; 203 204 /* Need pdev */ 205 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); 206 207 INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers); 208 209 mutex_init(&ca91cx42_bridge->irq_mtx); 210 211 /* Disable interrupts from PCI to VME */ 212 iowrite32(0, bridge->base + VINT_EN); 213 214 /* Disable PCI interrupts */ 215 iowrite32(0, bridge->base + LINT_EN); 216 /* Clear Any Pending PCI Interrupts */ 217 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT); 218 219 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED, 220 driver_name, ca91cx42_bridge); 221 if (result) { 222 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n", 223 pdev->irq); 224 return result; 225 } 226 227 /* Ensure all interrupts are mapped to PCI Interrupt 0 */ 228 iowrite32(0, bridge->base + LINT_MAP0); 229 iowrite32(0, bridge->base + LINT_MAP1); 230 iowrite32(0, bridge->base + LINT_MAP2); 231 232 /* Enable DMA, mailbox & LM Interrupts */ 233 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 | 234 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK | 235 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA; 236 237 iowrite32(tmp, bridge->base + LINT_EN); 238 239 return 0; 240} 241 242static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge, 243 struct pci_dev *pdev) 244{ 245 struct vme_bridge *ca91cx42_bridge; 246 247 /* Disable interrupts from PCI to VME */ 248 iowrite32(0, bridge->base + VINT_EN); 249 250 /* Disable PCI interrupts */ 251 iowrite32(0, bridge->base + LINT_EN); 252 /* Clear Any Pending PCI Interrupts */ 253 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT); 254 255 ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge, 256 driver_priv); 257 free_irq(pdev->irq, ca91cx42_bridge); 258} 259 260static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level) 261{ 262 u32 tmp; 263 264 tmp = ioread32(bridge->base + LINT_STAT); 265 266 if (tmp & (1 << level)) 267 return 0; 268 else 269 return 1; 270} 271 272/* 273 * Set up an VME interrupt 274 */ 275static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, 276 int state, int sync) 277 278{ 279 struct pci_dev *pdev; 280 u32 tmp; 281 struct ca91cx42_driver *bridge; 282 283 bridge = ca91cx42_bridge->driver_priv; 284 285 /* Enable IRQ level */ 286 tmp = ioread32(bridge->base + LINT_EN); 287 288 if (state == 0) 289 tmp &= ~CA91CX42_LINT_VIRQ[level]; 290 else 291 tmp |= CA91CX42_LINT_VIRQ[level]; 292 293 iowrite32(tmp, bridge->base + LINT_EN); 294 295 if ((state == 0) && (sync != 0)) { 296 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, 297 dev); 298 299 synchronize_irq(pdev->irq); 300 } 301} 302 303static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level, 304 int statid) 305{ 306 u32 tmp; 307 struct ca91cx42_driver *bridge; 308 309 bridge = ca91cx42_bridge->driver_priv; 310 311 /* Universe can only generate even vectors */ 312 if (statid & 1) 313 return -EINVAL; 314 315 mutex_lock(&bridge->vme_int); 316 317 tmp = ioread32(bridge->base + VINT_EN); 318 319 /* Set Status/ID */ 320 iowrite32(statid << 24, bridge->base + STATID); 321 322 /* Assert VMEbus IRQ */ 323 tmp = tmp | (1 << (level + 24)); 324 iowrite32(tmp, bridge->base + VINT_EN); 325 326 /* Wait for IACK */ 327 wait_event_interruptible(bridge->iack_queue, 328 ca91cx42_iack_received(bridge, level)); 329 330 /* Return interrupt to low state */ 331 tmp = ioread32(bridge->base + VINT_EN); 332 tmp = tmp & ~(1 << (level + 24)); 333 iowrite32(tmp, bridge->base + VINT_EN); 334 335 mutex_unlock(&bridge->vme_int); 336 337 return 0; 338} 339 340static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled, 341 unsigned long long vme_base, unsigned long long size, 342 dma_addr_t pci_base, u32 aspace, u32 cycle) 343{ 344 unsigned int i, addr = 0, granularity; 345 unsigned int temp_ctl = 0; 346 unsigned int vme_bound, pci_offset; 347 struct vme_bridge *ca91cx42_bridge; 348 struct ca91cx42_driver *bridge; 349 350 ca91cx42_bridge = image->parent; 351 352 bridge = ca91cx42_bridge->driver_priv; 353 354 i = image->number; 355 356 switch (aspace) { 357 case VME_A16: 358 addr |= CA91CX42_VSI_CTL_VAS_A16; 359 break; 360 case VME_A24: 361 addr |= CA91CX42_VSI_CTL_VAS_A24; 362 break; 363 case VME_A32: 364 addr |= CA91CX42_VSI_CTL_VAS_A32; 365 break; 366 case VME_USER1: 367 addr |= CA91CX42_VSI_CTL_VAS_USER1; 368 break; 369 case VME_USER2: 370 addr |= CA91CX42_VSI_CTL_VAS_USER2; 371 break; 372 case VME_A64: 373 case VME_CRCSR: 374 case VME_USER3: 375 case VME_USER4: 376 default: 377 dev_err(ca91cx42_bridge->parent, "Invalid address space\n"); 378 return -EINVAL; 379 break; 380 } 381 382 /* 383 * Bound address is a valid address for the window, adjust 384 * accordingly 385 */ 386 vme_bound = vme_base + size; 387 pci_offset = pci_base - vme_base; 388 389 if ((i == 0) || (i == 4)) 390 granularity = 0x1000; 391 else 392 granularity = 0x10000; 393 394 if (vme_base & (granularity - 1)) { 395 dev_err(ca91cx42_bridge->parent, "Invalid VME base " 396 "alignment\n"); 397 return -EINVAL; 398 } 399 if (vme_bound & (granularity - 1)) { 400 dev_err(ca91cx42_bridge->parent, "Invalid VME bound " 401 "alignment\n"); 402 return -EINVAL; 403 } 404 if (pci_offset & (granularity - 1)) { 405 dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset " 406 "alignment\n"); 407 return -EINVAL; 408 } 409 410 /* Disable while we are mucking around */ 411 temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]); 412 temp_ctl &= ~CA91CX42_VSI_CTL_EN; 413 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]); 414 415 /* Setup mapping */ 416 iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]); 417 iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]); 418 iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]); 419 420 /* Setup address space */ 421 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M; 422 temp_ctl |= addr; 423 424 /* Setup cycle types */ 425 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M); 426 if (cycle & VME_SUPER) 427 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR; 428 if (cycle & VME_USER) 429 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV; 430 if (cycle & VME_PROG) 431 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM; 432 if (cycle & VME_DATA) 433 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA; 434 435 /* Write ctl reg without enable */ 436 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]); 437 438 if (enabled) 439 temp_ctl |= CA91CX42_VSI_CTL_EN; 440 441 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]); 442 443 return 0; 444} 445 446static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, 447 unsigned long long *vme_base, unsigned long long *size, 448 dma_addr_t *pci_base, u32 *aspace, u32 *cycle) 449{ 450 unsigned int i, granularity = 0, ctl = 0; 451 unsigned long long vme_bound, pci_offset; 452 struct ca91cx42_driver *bridge; 453 454 bridge = image->parent->driver_priv; 455 456 i = image->number; 457 458 if ((i == 0) || (i == 4)) 459 granularity = 0x1000; 460 else 461 granularity = 0x10000; 462 463 /* Read Registers */ 464 ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]); 465 466 *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]); 467 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); 468 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); 469 470 *pci_base = (dma_addr_t)vme_base + pci_offset; 471 *size = (unsigned long long)((vme_bound - *vme_base) + granularity); 472 473 *enabled = 0; 474 *aspace = 0; 475 *cycle = 0; 476 477 if (ctl & CA91CX42_VSI_CTL_EN) 478 *enabled = 1; 479 480 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16) 481 *aspace = VME_A16; 482 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24) 483 *aspace = VME_A24; 484 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32) 485 *aspace = VME_A32; 486 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1) 487 *aspace = VME_USER1; 488 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2) 489 *aspace = VME_USER2; 490 491 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR) 492 *cycle |= VME_SUPER; 493 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV) 494 *cycle |= VME_USER; 495 if (ctl & CA91CX42_VSI_CTL_PGM_PGM) 496 *cycle |= VME_PROG; 497 if (ctl & CA91CX42_VSI_CTL_PGM_DATA) 498 *cycle |= VME_DATA; 499 500 return 0; 501} 502 503/* 504 * Allocate and map PCI Resource 505 */ 506static int ca91cx42_alloc_resource(struct vme_master_resource *image, 507 unsigned long long size) 508{ 509 unsigned long long existing_size; 510 int retval = 0; 511 struct pci_dev *pdev; 512 struct vme_bridge *ca91cx42_bridge; 513 514 ca91cx42_bridge = image->parent; 515 516 /* Find pci_dev container of dev */ 517 if (ca91cx42_bridge->parent == NULL) { 518 dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n"); 519 return -EINVAL; 520 } 521 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); 522 523 existing_size = (unsigned long long)(image->bus_resource.end - 524 image->bus_resource.start); 525 526 /* If the existing size is OK, return */ 527 if (existing_size == (size - 1)) 528 return 0; 529 530 if (existing_size != 0) { 531 iounmap(image->kern_base); 532 image->kern_base = NULL; 533 kfree(image->bus_resource.name); 534 release_resource(&image->bus_resource); 535 memset(&image->bus_resource, 0, sizeof(struct resource)); 536 } 537 538 if (image->bus_resource.name == NULL) { 539 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC); 540 if (image->bus_resource.name == NULL) { 541 dev_err(ca91cx42_bridge->parent, "Unable to allocate " 542 "memory for resource name\n"); 543 retval = -ENOMEM; 544 goto err_name; 545 } 546 } 547 548 sprintf((char *)image->bus_resource.name, "%s.%d", 549 ca91cx42_bridge->name, image->number); 550 551 image->bus_resource.start = 0; 552 image->bus_resource.end = (unsigned long)size; 553 image->bus_resource.flags = IORESOURCE_MEM; 554 555 retval = pci_bus_alloc_resource(pdev->bus, 556 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM, 557 0, NULL, NULL); 558 if (retval) { 559 dev_err(ca91cx42_bridge->parent, "Failed to allocate mem " 560 "resource for window %d size 0x%lx start 0x%lx\n", 561 image->number, (unsigned long)size, 562 (unsigned long)image->bus_resource.start); 563 goto err_resource; 564 } 565 566 image->kern_base = ioremap_nocache( 567 image->bus_resource.start, size); 568 if (image->kern_base == NULL) { 569 dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n"); 570 retval = -ENOMEM; 571 goto err_remap; 572 } 573 574 return 0; 575 576err_remap: 577 release_resource(&image->bus_resource); 578err_resource: 579 kfree(image->bus_resource.name); 580 memset(&image->bus_resource, 0, sizeof(struct resource)); 581err_name: 582 return retval; 583} 584 585/* 586 * Free and unmap PCI Resource 587 */ 588static void ca91cx42_free_resource(struct vme_master_resource *image) 589{ 590 iounmap(image->kern_base); 591 image->kern_base = NULL; 592 release_resource(&image->bus_resource); 593 kfree(image->bus_resource.name); 594 memset(&image->bus_resource, 0, sizeof(struct resource)); 595} 596 597 598static int ca91cx42_master_set(struct vme_master_resource *image, int enabled, 599 unsigned long long vme_base, unsigned long long size, u32 aspace, 600 u32 cycle, u32 dwidth) 601{ 602 int retval = 0; 603 unsigned int i, granularity = 0; 604 unsigned int temp_ctl = 0; 605 unsigned long long pci_bound, vme_offset, pci_base; 606 struct vme_bridge *ca91cx42_bridge; 607 struct ca91cx42_driver *bridge; 608 609 ca91cx42_bridge = image->parent; 610 611 bridge = ca91cx42_bridge->driver_priv; 612 613 i = image->number; 614 615 if ((i == 0) || (i == 4)) 616 granularity = 0x1000; 617 else 618 granularity = 0x10000; 619 620 /* Verify input data */ 621 if (vme_base & (granularity - 1)) { 622 dev_err(ca91cx42_bridge->parent, "Invalid VME Window " 623 "alignment\n"); 624 retval = -EINVAL; 625 goto err_window; 626 } 627 if (size & (granularity - 1)) { 628 dev_err(ca91cx42_bridge->parent, "Invalid VME Window " 629 "alignment\n"); 630 retval = -EINVAL; 631 goto err_window; 632 } 633 634 spin_lock(&image->lock); 635 636 /* 637 * Let's allocate the resource here rather than further up the stack as 638 * it avoids pushing loads of bus dependent stuff up the stack 639 */ 640 retval = ca91cx42_alloc_resource(image, size); 641 if (retval) { 642 spin_unlock(&image->lock); 643 dev_err(ca91cx42_bridge->parent, "Unable to allocate memory " 644 "for resource name\n"); 645 retval = -ENOMEM; 646 goto err_res; 647 } 648 649 pci_base = (unsigned long long)image->bus_resource.start; 650 651 /* 652 * Bound address is a valid address for the window, adjust 653 * according to window granularity. 654 */ 655 pci_bound = pci_base + size; 656 vme_offset = vme_base - pci_base; 657 658 /* Disable while we are mucking around */ 659 temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]); 660 temp_ctl &= ~CA91CX42_LSI_CTL_EN; 661 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]); 662 663 /* Setup cycle types */ 664 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M; 665 if (cycle & VME_BLT) 666 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT; 667 if (cycle & VME_MBLT) 668 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT; 669 670 /* Setup data width */ 671 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M; 672 switch (dwidth) { 673 case VME_D8: 674 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8; 675 break; 676 case VME_D16: 677 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16; 678 break; 679 case VME_D32: 680 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32; 681 break; 682 case VME_D64: 683 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64; 684 break; 685 default: 686 spin_unlock(&image->lock); 687 dev_err(ca91cx42_bridge->parent, "Invalid data width\n"); 688 retval = -EINVAL; 689 goto err_dwidth; 690 break; 691 } 692 693 /* Setup address space */ 694 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M; 695 switch (aspace) { 696 case VME_A16: 697 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16; 698 break; 699 case VME_A24: 700 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24; 701 break; 702 case VME_A32: 703 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32; 704 break; 705 case VME_CRCSR: 706 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR; 707 break; 708 case VME_USER1: 709 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1; 710 break; 711 case VME_USER2: 712 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2; 713 break; 714 case VME_A64: 715 case VME_USER3: 716 case VME_USER4: 717 default: 718 spin_unlock(&image->lock); 719 dev_err(ca91cx42_bridge->parent, "Invalid address space\n"); 720 retval = -EINVAL; 721 goto err_aspace; 722 break; 723 } 724 725 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M); 726 if (cycle & VME_SUPER) 727 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR; 728 if (cycle & VME_PROG) 729 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM; 730 731 /* Setup mapping */ 732 iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]); 733 iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]); 734 iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]); 735 736 /* Write ctl reg without enable */ 737 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]); 738 739 if (enabled) 740 temp_ctl |= CA91CX42_LSI_CTL_EN; 741 742 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]); 743 744 spin_unlock(&image->lock); 745 return 0; 746 747err_aspace: 748err_dwidth: 749 ca91cx42_free_resource(image); 750err_res: 751err_window: 752 return retval; 753} 754 755static int __ca91cx42_master_get(struct vme_master_resource *image, 756 int *enabled, unsigned long long *vme_base, unsigned long long *size, 757 u32 *aspace, u32 *cycle, u32 *dwidth) 758{ 759 unsigned int i, ctl; 760 unsigned long long pci_base, pci_bound, vme_offset; 761 struct ca91cx42_driver *bridge; 762 763 bridge = image->parent->driver_priv; 764 765 i = image->number; 766 767 ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]); 768 769 pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]); 770 vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]); 771 pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]); 772 773 *vme_base = pci_base + vme_offset; 774 *size = (unsigned long long)(pci_bound - pci_base); 775 776 *enabled = 0; 777 *aspace = 0; 778 *cycle = 0; 779 *dwidth = 0; 780 781 if (ctl & CA91CX42_LSI_CTL_EN) 782 *enabled = 1; 783 784 /* Setup address space */ 785 switch (ctl & CA91CX42_LSI_CTL_VAS_M) { 786 case CA91CX42_LSI_CTL_VAS_A16: 787 *aspace = VME_A16; 788 break; 789 case CA91CX42_LSI_CTL_VAS_A24: 790 *aspace = VME_A24; 791 break; 792 case CA91CX42_LSI_CTL_VAS_A32: 793 *aspace = VME_A32; 794 break; 795 case CA91CX42_LSI_CTL_VAS_CRCSR: 796 *aspace = VME_CRCSR; 797 break; 798 case CA91CX42_LSI_CTL_VAS_USER1: 799 *aspace = VME_USER1; 800 break; 801 case CA91CX42_LSI_CTL_VAS_USER2: 802 *aspace = VME_USER2; 803 break; 804 } 805 806 /* XXX Not sure howto check for MBLT */ 807 /* Setup cycle types */ 808 if (ctl & CA91CX42_LSI_CTL_VCT_BLT) 809 *cycle |= VME_BLT; 810 else 811 *cycle |= VME_SCT; 812 813 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR) 814 *cycle |= VME_SUPER; 815 else 816 *cycle |= VME_USER; 817 818 if (ctl & CA91CX42_LSI_CTL_PGM_PGM) 819 *cycle = VME_PROG; 820 else 821 *cycle = VME_DATA; 822 823 /* Setup data width */ 824 switch (ctl & CA91CX42_LSI_CTL_VDW_M) { 825 case CA91CX42_LSI_CTL_VDW_D8: 826 *dwidth = VME_D8; 827 break; 828 case CA91CX42_LSI_CTL_VDW_D16: 829 *dwidth = VME_D16; 830 break; 831 case CA91CX42_LSI_CTL_VDW_D32: 832 *dwidth = VME_D32; 833 break; 834 case CA91CX42_LSI_CTL_VDW_D64: 835 *dwidth = VME_D64; 836 break; 837 } 838 839 return 0; 840} 841 842static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled, 843 unsigned long long *vme_base, unsigned long long *size, u32 *aspace, 844 u32 *cycle, u32 *dwidth) 845{ 846 int retval; 847 848 spin_lock(&image->lock); 849 850 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace, 851 cycle, dwidth); 852 853 spin_unlock(&image->lock); 854 855 return retval; 856} 857 858static ssize_t ca91cx42_master_read(struct vme_master_resource *image, 859 void *buf, size_t count, loff_t offset) 860{ 861 ssize_t retval; 862 void __iomem *addr = image->kern_base + offset; 863 unsigned int done = 0; 864 unsigned int count32; 865 866 if (count == 0) 867 return 0; 868 869 spin_lock(&image->lock); 870 871 /* The following code handles VME address alignment. We cannot use 872 * memcpy_xxx here because it may cut data transfers in to 8-bit 873 * cycles when D16 or D32 cycles are required on the VME bus. 874 * On the other hand, the bridge itself assures that the maximum data 875 * cycle configured for the transfer is used and splits it 876 * automatically for non-aligned addresses, so we don't want the 877 * overhead of needlessly forcing small transfers for the entire cycle. 878 */ 879 if ((uintptr_t)addr & 0x1) { 880 *(u8 *)buf = ioread8(addr); 881 done += 1; 882 if (done == count) 883 goto out; 884 } 885 if ((uintptr_t)(addr + done) & 0x2) { 886 if ((count - done) < 2) { 887 *(u8 *)(buf + done) = ioread8(addr + done); 888 done += 1; 889 goto out; 890 } else { 891 *(u16 *)(buf + done) = ioread16(addr + done); 892 done += 2; 893 } 894 } 895 896 count32 = (count - done) & ~0x3; 897 while (done < count32) { 898 *(u32 *)(buf + done) = ioread32(addr + done); 899 done += 4; 900 } 901 902 if ((count - done) & 0x2) { 903 *(u16 *)(buf + done) = ioread16(addr + done); 904 done += 2; 905 } 906 if ((count - done) & 0x1) { 907 *(u8 *)(buf + done) = ioread8(addr + done); 908 done += 1; 909 } 910out: 911 retval = count; 912 spin_unlock(&image->lock); 913 914 return retval; 915} 916 917static ssize_t ca91cx42_master_write(struct vme_master_resource *image, 918 void *buf, size_t count, loff_t offset) 919{ 920 ssize_t retval; 921 void __iomem *addr = image->kern_base + offset; 922 unsigned int done = 0; 923 unsigned int count32; 924 925 if (count == 0) 926 return 0; 927 928 spin_lock(&image->lock); 929 930 /* Here we apply for the same strategy we do in master_read 931 * function in order to assure the correct cycles. 932 */ 933 if ((uintptr_t)addr & 0x1) { 934 iowrite8(*(u8 *)buf, addr); 935 done += 1; 936 if (done == count) 937 goto out; 938 } 939 if ((uintptr_t)(addr + done) & 0x2) { 940 if ((count - done) < 2) { 941 iowrite8(*(u8 *)(buf + done), addr + done); 942 done += 1; 943 goto out; 944 } else { 945 iowrite16(*(u16 *)(buf + done), addr + done); 946 done += 2; 947 } 948 } 949 950 count32 = (count - done) & ~0x3; 951 while (done < count32) { 952 iowrite32(*(u32 *)(buf + done), addr + done); 953 done += 4; 954 } 955 956 if ((count - done) & 0x2) { 957 iowrite16(*(u16 *)(buf + done), addr + done); 958 done += 2; 959 } 960 if ((count - done) & 0x1) { 961 iowrite8(*(u8 *)(buf + done), addr + done); 962 done += 1; 963 } 964out: 965 retval = count; 966 967 spin_unlock(&image->lock); 968 969 return retval; 970} 971 972static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image, 973 unsigned int mask, unsigned int compare, unsigned int swap, 974 loff_t offset) 975{ 976 u32 result; 977 uintptr_t pci_addr; 978 int i; 979 struct ca91cx42_driver *bridge; 980 struct device *dev; 981 982 bridge = image->parent->driver_priv; 983 dev = image->parent->parent; 984 985 /* Find the PCI address that maps to the desired VME address */ 986 i = image->number; 987 988 /* Locking as we can only do one of these at a time */ 989 mutex_lock(&bridge->vme_rmw); 990 991 /* Lock image */ 992 spin_lock(&image->lock); 993 994 pci_addr = (uintptr_t)image->kern_base + offset; 995 996 /* Address must be 4-byte aligned */ 997 if (pci_addr & 0x3) { 998 dev_err(dev, "RMW Address not 4-byte aligned\n"); 999 result = -EINVAL; 1000 goto out; 1001 } 1002 1003 /* Ensure RMW Disabled whilst configuring */ 1004 iowrite32(0, bridge->base + SCYC_CTL); 1005 1006 /* Configure registers */ 1007 iowrite32(mask, bridge->base + SCYC_EN); 1008 iowrite32(compare, bridge->base + SCYC_CMP); 1009 iowrite32(swap, bridge->base + SCYC_SWP); 1010 iowrite32(pci_addr, bridge->base + SCYC_ADDR); 1011 1012 /* Enable RMW */ 1013 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL); 1014 1015 /* Kick process off with a read to the required address. */ 1016 result = ioread32(image->kern_base + offset); 1017 1018 /* Disable RMW */ 1019 iowrite32(0, bridge->base + SCYC_CTL); 1020 1021out: 1022 spin_unlock(&image->lock); 1023 1024 mutex_unlock(&bridge->vme_rmw); 1025 1026 return result; 1027} 1028 1029static int ca91cx42_dma_list_add(struct vme_dma_list *list, 1030 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count) 1031{ 1032 struct ca91cx42_dma_entry *entry, *prev; 1033 struct vme_dma_pci *pci_attr; 1034 struct vme_dma_vme *vme_attr; 1035 dma_addr_t desc_ptr; 1036 int retval = 0; 1037 struct device *dev; 1038 1039 dev = list->parent->parent->parent; 1040 1041 /* XXX descriptor must be aligned on 64-bit boundaries */ 1042 entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL); 1043 if (entry == NULL) { 1044 dev_err(dev, "Failed to allocate memory for dma resource " 1045 "structure\n"); 1046 retval = -ENOMEM; 1047 goto err_mem; 1048 } 1049 1050 /* Test descriptor alignment */ 1051 if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) { 1052 dev_err(dev, "Descriptor not aligned to 16 byte boundary as " 1053 "required: %p\n", &entry->descriptor); 1054 retval = -EINVAL; 1055 goto err_align; 1056 } 1057 1058 memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor)); 1059 1060 if (dest->type == VME_DMA_VME) { 1061 entry->descriptor.dctl |= CA91CX42_DCTL_L2V; 1062 vme_attr = dest->private; 1063 pci_attr = src->private; 1064 } else { 1065 vme_attr = src->private; 1066 pci_attr = dest->private; 1067 } 1068 1069 /* Check we can do fulfill required attributes */ 1070 if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 | 1071 VME_USER2)) != 0) { 1072 1073 dev_err(dev, "Unsupported cycle type\n"); 1074 retval = -EINVAL; 1075 goto err_aspace; 1076 } 1077 1078 if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER | 1079 VME_PROG | VME_DATA)) != 0) { 1080 1081 dev_err(dev, "Unsupported cycle type\n"); 1082 retval = -EINVAL; 1083 goto err_cycle; 1084 } 1085 1086 /* Check to see if we can fulfill source and destination */ 1087 if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) || 1088 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) { 1089 1090 dev_err(dev, "Cannot perform transfer with this " 1091 "source-destination combination\n"); 1092 retval = -EINVAL; 1093 goto err_direct; 1094 } 1095 1096 /* Setup cycle types */ 1097 if (vme_attr->cycle & VME_BLT) 1098 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT; 1099 1100 /* Setup data width */ 1101 switch (vme_attr->dwidth) { 1102 case VME_D8: 1103 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8; 1104 break; 1105 case VME_D16: 1106 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16; 1107 break; 1108 case VME_D32: 1109 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32; 1110 break; 1111 case VME_D64: 1112 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64; 1113 break; 1114 default: 1115 dev_err(dev, "Invalid data width\n"); 1116 return -EINVAL; 1117 } 1118 1119 /* Setup address space */ 1120 switch (vme_attr->aspace) { 1121 case VME_A16: 1122 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16; 1123 break; 1124 case VME_A24: 1125 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24; 1126 break; 1127 case VME_A32: 1128 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32; 1129 break; 1130 case VME_USER1: 1131 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1; 1132 break; 1133 case VME_USER2: 1134 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2; 1135 break; 1136 default: 1137 dev_err(dev, "Invalid address space\n"); 1138 return -EINVAL; 1139 break; 1140 } 1141 1142 if (vme_attr->cycle & VME_SUPER) 1143 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR; 1144 if (vme_attr->cycle & VME_PROG) 1145 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM; 1146 1147 entry->descriptor.dtbc = count; 1148 entry->descriptor.dla = pci_attr->address; 1149 entry->descriptor.dva = vme_attr->address; 1150 entry->descriptor.dcpp = CA91CX42_DCPP_NULL; 1151 1152 /* Add to list */ 1153 list_add_tail(&entry->list, &list->entries); 1154 1155 /* Fill out previous descriptors "Next Address" */ 1156 if (entry->list.prev != &list->entries) { 1157 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry, 1158 list); 1159 /* We need the bus address for the pointer */ 1160 desc_ptr = virt_to_bus(&entry->descriptor); 1161 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M; 1162 } 1163 1164 return 0; 1165 1166err_cycle: 1167err_aspace: 1168err_direct: 1169err_align: 1170 kfree(entry); 1171err_mem: 1172 return retval; 1173} 1174 1175static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge) 1176{ 1177 u32 tmp; 1178 struct ca91cx42_driver *bridge; 1179 1180 bridge = ca91cx42_bridge->driver_priv; 1181 1182 tmp = ioread32(bridge->base + DGCS); 1183 1184 if (tmp & CA91CX42_DGCS_ACT) 1185 return 0; 1186 else 1187 return 1; 1188} 1189 1190static int ca91cx42_dma_list_exec(struct vme_dma_list *list) 1191{ 1192 struct vme_dma_resource *ctrlr; 1193 struct ca91cx42_dma_entry *entry; 1194 int retval; 1195 dma_addr_t bus_addr; 1196 u32 val; 1197 struct device *dev; 1198 struct ca91cx42_driver *bridge; 1199 1200 ctrlr = list->parent; 1201 1202 bridge = ctrlr->parent->driver_priv; 1203 dev = ctrlr->parent->parent; 1204 1205 mutex_lock(&ctrlr->mtx); 1206 1207 if (!(list_empty(&ctrlr->running))) { 1208 /* 1209 * XXX We have an active DMA transfer and currently haven't 1210 * sorted out the mechanism for "pending" DMA transfers. 1211 * Return busy. 1212 */ 1213 /* Need to add to pending here */ 1214 mutex_unlock(&ctrlr->mtx); 1215 return -EBUSY; 1216 } else { 1217 list_add(&list->list, &ctrlr->running); 1218 } 1219 1220 /* Get first bus address and write into registers */ 1221 entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry, 1222 list); 1223 1224 bus_addr = virt_to_bus(&entry->descriptor); 1225 1226 mutex_unlock(&ctrlr->mtx); 1227 1228 iowrite32(0, bridge->base + DTBC); 1229 iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP); 1230 1231 /* Start the operation */ 1232 val = ioread32(bridge->base + DGCS); 1233 1234 /* XXX Could set VMEbus On and Off Counters here */ 1235 val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M); 1236 1237 val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT | 1238 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR | 1239 CA91CX42_DGCS_PERR); 1240 1241 iowrite32(val, bridge->base + DGCS); 1242 1243 val |= CA91CX42_DGCS_GO; 1244 1245 iowrite32(val, bridge->base + DGCS); 1246 1247 retval = wait_event_interruptible(bridge->dma_queue, 1248 ca91cx42_dma_busy(ctrlr->parent)); 1249 1250 if (retval) { 1251 val = ioread32(bridge->base + DGCS); 1252 iowrite32(val | CA91CX42_DGCS_STOP_REQ, bridge->base + DGCS); 1253 /* Wait for the operation to abort */ 1254 wait_event(bridge->dma_queue, 1255 ca91cx42_dma_busy(ctrlr->parent)); 1256 retval = -EINTR; 1257 goto exit; 1258 } 1259 1260 /* 1261 * Read status register, this register is valid until we kick off a 1262 * new transfer. 1263 */ 1264 val = ioread32(bridge->base + DGCS); 1265 1266 if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR | 1267 CA91CX42_DGCS_PERR)) { 1268 1269 dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val); 1270 val = ioread32(bridge->base + DCTL); 1271 retval = -EIO; 1272 } 1273 1274exit: 1275 /* Remove list from running list */ 1276 mutex_lock(&ctrlr->mtx); 1277 list_del(&list->list); 1278 mutex_unlock(&ctrlr->mtx); 1279 1280 return retval; 1281 1282} 1283 1284static int ca91cx42_dma_list_empty(struct vme_dma_list *list) 1285{ 1286 struct list_head *pos, *temp; 1287 struct ca91cx42_dma_entry *entry; 1288 1289 /* detach and free each entry */ 1290 list_for_each_safe(pos, temp, &list->entries) { 1291 list_del(pos); 1292 entry = list_entry(pos, struct ca91cx42_dma_entry, list); 1293 kfree(entry); 1294 } 1295 1296 return 0; 1297} 1298 1299/* 1300 * All 4 location monitors reside at the same base - this is therefore a 1301 * system wide configuration. 1302 * 1303 * This does not enable the LM monitor - that should be done when the first 1304 * callback is attached and disabled when the last callback is removed. 1305 */ 1306static int ca91cx42_lm_set(struct vme_lm_resource *lm, 1307 unsigned long long lm_base, u32 aspace, u32 cycle) 1308{ 1309 u32 temp_base, lm_ctl = 0; 1310 int i; 1311 struct ca91cx42_driver *bridge; 1312 struct device *dev; 1313 1314 bridge = lm->parent->driver_priv; 1315 dev = lm->parent->parent; 1316 1317 /* Check the alignment of the location monitor */ 1318 temp_base = (u32)lm_base; 1319 if (temp_base & 0xffff) { 1320 dev_err(dev, "Location monitor must be aligned to 64KB " 1321 "boundary"); 1322 return -EINVAL; 1323 } 1324 1325 mutex_lock(&lm->mtx); 1326 1327 /* If we already have a callback attached, we can't move it! */ 1328 for (i = 0; i < lm->monitors; i++) { 1329 if (bridge->lm_callback[i] != NULL) { 1330 mutex_unlock(&lm->mtx); 1331 dev_err(dev, "Location monitor callback attached, " 1332 "can't reset\n"); 1333 return -EBUSY; 1334 } 1335 } 1336 1337 switch (aspace) { 1338 case VME_A16: 1339 lm_ctl |= CA91CX42_LM_CTL_AS_A16; 1340 break; 1341 case VME_A24: 1342 lm_ctl |= CA91CX42_LM_CTL_AS_A24; 1343 break; 1344 case VME_A32: 1345 lm_ctl |= CA91CX42_LM_CTL_AS_A32; 1346 break; 1347 default: 1348 mutex_unlock(&lm->mtx); 1349 dev_err(dev, "Invalid address space\n"); 1350 return -EINVAL; 1351 break; 1352 } 1353 1354 if (cycle & VME_SUPER) 1355 lm_ctl |= CA91CX42_LM_CTL_SUPR; 1356 if (cycle & VME_USER) 1357 lm_ctl |= CA91CX42_LM_CTL_NPRIV; 1358 if (cycle & VME_PROG) 1359 lm_ctl |= CA91CX42_LM_CTL_PGM; 1360 if (cycle & VME_DATA) 1361 lm_ctl |= CA91CX42_LM_CTL_DATA; 1362 1363 iowrite32(lm_base, bridge->base + LM_BS); 1364 iowrite32(lm_ctl, bridge->base + LM_CTL); 1365 1366 mutex_unlock(&lm->mtx); 1367 1368 return 0; 1369} 1370 1371/* Get configuration of the callback monitor and return whether it is enabled 1372 * or disabled. 1373 */ 1374static int ca91cx42_lm_get(struct vme_lm_resource *lm, 1375 unsigned long long *lm_base, u32 *aspace, u32 *cycle) 1376{ 1377 u32 lm_ctl, enabled = 0; 1378 struct ca91cx42_driver *bridge; 1379 1380 bridge = lm->parent->driver_priv; 1381 1382 mutex_lock(&lm->mtx); 1383 1384 *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS); 1385 lm_ctl = ioread32(bridge->base + LM_CTL); 1386 1387 if (lm_ctl & CA91CX42_LM_CTL_EN) 1388 enabled = 1; 1389 1390 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16) 1391 *aspace = VME_A16; 1392 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24) 1393 *aspace = VME_A24; 1394 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32) 1395 *aspace = VME_A32; 1396 1397 *cycle = 0; 1398 if (lm_ctl & CA91CX42_LM_CTL_SUPR) 1399 *cycle |= VME_SUPER; 1400 if (lm_ctl & CA91CX42_LM_CTL_NPRIV) 1401 *cycle |= VME_USER; 1402 if (lm_ctl & CA91CX42_LM_CTL_PGM) 1403 *cycle |= VME_PROG; 1404 if (lm_ctl & CA91CX42_LM_CTL_DATA) 1405 *cycle |= VME_DATA; 1406 1407 mutex_unlock(&lm->mtx); 1408 1409 return enabled; 1410} 1411 1412/* 1413 * Attach a callback to a specific location monitor. 1414 * 1415 * Callback will be passed the monitor triggered. 1416 */ 1417static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor, 1418 void (*callback)(int)) 1419{ 1420 u32 lm_ctl, tmp; 1421 struct ca91cx42_driver *bridge; 1422 struct device *dev; 1423 1424 bridge = lm->parent->driver_priv; 1425 dev = lm->parent->parent; 1426 1427 mutex_lock(&lm->mtx); 1428 1429 /* Ensure that the location monitor is configured - need PGM or DATA */ 1430 lm_ctl = ioread32(bridge->base + LM_CTL); 1431 if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) { 1432 mutex_unlock(&lm->mtx); 1433 dev_err(dev, "Location monitor not properly configured\n"); 1434 return -EINVAL; 1435 } 1436 1437 /* Check that a callback isn't already attached */ 1438 if (bridge->lm_callback[monitor] != NULL) { 1439 mutex_unlock(&lm->mtx); 1440 dev_err(dev, "Existing callback attached\n"); 1441 return -EBUSY; 1442 } 1443 1444 /* Attach callback */ 1445 bridge->lm_callback[monitor] = callback; 1446 1447 /* Enable Location Monitor interrupt */ 1448 tmp = ioread32(bridge->base + LINT_EN); 1449 tmp |= CA91CX42_LINT_LM[monitor]; 1450 iowrite32(tmp, bridge->base + LINT_EN); 1451 1452 /* Ensure that global Location Monitor Enable set */ 1453 if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) { 1454 lm_ctl |= CA91CX42_LM_CTL_EN; 1455 iowrite32(lm_ctl, bridge->base + LM_CTL); 1456 } 1457 1458 mutex_unlock(&lm->mtx); 1459 1460 return 0; 1461} 1462 1463/* 1464 * Detach a callback function forn a specific location monitor. 1465 */ 1466static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor) 1467{ 1468 u32 tmp; 1469 struct ca91cx42_driver *bridge; 1470 1471 bridge = lm->parent->driver_priv; 1472 1473 mutex_lock(&lm->mtx); 1474 1475 /* Disable Location Monitor and ensure previous interrupts are clear */ 1476 tmp = ioread32(bridge->base + LINT_EN); 1477 tmp &= ~CA91CX42_LINT_LM[monitor]; 1478 iowrite32(tmp, bridge->base + LINT_EN); 1479 1480 iowrite32(CA91CX42_LINT_LM[monitor], 1481 bridge->base + LINT_STAT); 1482 1483 /* Detach callback */ 1484 bridge->lm_callback[monitor] = NULL; 1485 1486 /* If all location monitors disabled, disable global Location Monitor */ 1487 if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 | 1488 CA91CX42_LINT_LM3)) == 0) { 1489 tmp = ioread32(bridge->base + LM_CTL); 1490 tmp &= ~CA91CX42_LM_CTL_EN; 1491 iowrite32(tmp, bridge->base + LM_CTL); 1492 } 1493 1494 mutex_unlock(&lm->mtx); 1495 1496 return 0; 1497} 1498 1499static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge) 1500{ 1501 u32 slot = 0; 1502 struct ca91cx42_driver *bridge; 1503 1504 bridge = ca91cx42_bridge->driver_priv; 1505 1506 if (!geoid) { 1507 slot = ioread32(bridge->base + VCSR_BS); 1508 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27); 1509 } else 1510 slot = geoid; 1511 1512 return (int)slot; 1513 1514} 1515 1516static void *ca91cx42_alloc_consistent(struct device *parent, size_t size, 1517 dma_addr_t *dma) 1518{ 1519 struct pci_dev *pdev; 1520 1521 /* Find pci_dev container of dev */ 1522 pdev = container_of(parent, struct pci_dev, dev); 1523 1524 return pci_alloc_consistent(pdev, size, dma); 1525} 1526 1527static void ca91cx42_free_consistent(struct device *parent, size_t size, 1528 void *vaddr, dma_addr_t dma) 1529{ 1530 struct pci_dev *pdev; 1531 1532 /* Find pci_dev container of dev */ 1533 pdev = container_of(parent, struct pci_dev, dev); 1534 1535 pci_free_consistent(pdev, size, vaddr, dma); 1536} 1537 1538/* 1539 * Configure CR/CSR space 1540 * 1541 * Access to the CR/CSR can be configured at power-up. The location of the 1542 * CR/CSR registers in the CR/CSR address space is determined by the boards 1543 * Auto-ID or Geographic address. This function ensures that the window is 1544 * enabled at an offset consistent with the boards geopgraphic address. 1545 */ 1546static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge, 1547 struct pci_dev *pdev) 1548{ 1549 unsigned int crcsr_addr; 1550 int tmp, slot; 1551 struct ca91cx42_driver *bridge; 1552 1553 bridge = ca91cx42_bridge->driver_priv; 1554 1555 slot = ca91cx42_slot_get(ca91cx42_bridge); 1556 1557 /* Write CSR Base Address if slot ID is supplied as a module param */ 1558 if (geoid) 1559 iowrite32(geoid << 27, bridge->base + VCSR_BS); 1560 1561 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot); 1562 if (slot == 0) { 1563 dev_err(&pdev->dev, "Slot number is unset, not configuring " 1564 "CR/CSR space\n"); 1565 return -EINVAL; 1566 } 1567 1568 /* Allocate mem for CR/CSR image */ 1569 bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE, 1570 &bridge->crcsr_bus); 1571 if (bridge->crcsr_kernel == NULL) { 1572 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR " 1573 "image\n"); 1574 return -ENOMEM; 1575 } 1576 1577 crcsr_addr = slot * (512 * 1024); 1578 iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO); 1579 1580 tmp = ioread32(bridge->base + VCSR_CTL); 1581 tmp |= CA91CX42_VCSR_CTL_EN; 1582 iowrite32(tmp, bridge->base + VCSR_CTL); 1583 1584 return 0; 1585} 1586 1587static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge, 1588 struct pci_dev *pdev) 1589{ 1590 u32 tmp; 1591 struct ca91cx42_driver *bridge; 1592 1593 bridge = ca91cx42_bridge->driver_priv; 1594 1595 /* Turn off CR/CSR space */ 1596 tmp = ioread32(bridge->base + VCSR_CTL); 1597 tmp &= ~CA91CX42_VCSR_CTL_EN; 1598 iowrite32(tmp, bridge->base + VCSR_CTL); 1599 1600 /* Free image */ 1601 iowrite32(0, bridge->base + VCSR_TO); 1602 1603 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel, 1604 bridge->crcsr_bus); 1605} 1606 1607static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1608{ 1609 int retval, i; 1610 u32 data; 1611 struct list_head *pos = NULL, *n; 1612 struct vme_bridge *ca91cx42_bridge; 1613 struct ca91cx42_driver *ca91cx42_device; 1614 struct vme_master_resource *master_image; 1615 struct vme_slave_resource *slave_image; 1616 struct vme_dma_resource *dma_ctrlr; 1617 struct vme_lm_resource *lm; 1618 1619 /* We want to support more than one of each bridge so we need to 1620 * dynamically allocate the bridge structure 1621 */ 1622 ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL); 1623 1624 if (ca91cx42_bridge == NULL) { 1625 dev_err(&pdev->dev, "Failed to allocate memory for device " 1626 "structure\n"); 1627 retval = -ENOMEM; 1628 goto err_struct; 1629 } 1630 1631 ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL); 1632 1633 if (ca91cx42_device == NULL) { 1634 dev_err(&pdev->dev, "Failed to allocate memory for device " 1635 "structure\n"); 1636 retval = -ENOMEM; 1637 goto err_driver; 1638 } 1639 1640 ca91cx42_bridge->driver_priv = ca91cx42_device; 1641 1642 /* Enable the device */ 1643 retval = pci_enable_device(pdev); 1644 if (retval) { 1645 dev_err(&pdev->dev, "Unable to enable device\n"); 1646 goto err_enable; 1647 } 1648 1649 /* Map Registers */ 1650 retval = pci_request_regions(pdev, driver_name); 1651 if (retval) { 1652 dev_err(&pdev->dev, "Unable to reserve resources\n"); 1653 goto err_resource; 1654 } 1655 1656 /* map registers in BAR 0 */ 1657 ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0), 1658 4096); 1659 if (!ca91cx42_device->base) { 1660 dev_err(&pdev->dev, "Unable to remap CRG region\n"); 1661 retval = -EIO; 1662 goto err_remap; 1663 } 1664 1665 /* Check to see if the mapping worked out */ 1666 data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF; 1667 if (data != PCI_VENDOR_ID_TUNDRA) { 1668 dev_err(&pdev->dev, "PCI_ID check failed\n"); 1669 retval = -EIO; 1670 goto err_test; 1671 } 1672 1673 /* Initialize wait queues & mutual exclusion flags */ 1674 init_waitqueue_head(&ca91cx42_device->dma_queue); 1675 init_waitqueue_head(&ca91cx42_device->iack_queue); 1676 mutex_init(&ca91cx42_device->vme_int); 1677 mutex_init(&ca91cx42_device->vme_rmw); 1678 1679 ca91cx42_bridge->parent = &pdev->dev; 1680 strcpy(ca91cx42_bridge->name, driver_name); 1681 1682 /* Setup IRQ */ 1683 retval = ca91cx42_irq_init(ca91cx42_bridge); 1684 if (retval != 0) { 1685 dev_err(&pdev->dev, "Chip Initialization failed.\n"); 1686 goto err_irq; 1687 } 1688 1689 /* Add master windows to list */ 1690 INIT_LIST_HEAD(&ca91cx42_bridge->master_resources); 1691 for (i = 0; i < CA91C142_MAX_MASTER; i++) { 1692 master_image = kmalloc(sizeof(struct vme_master_resource), 1693 GFP_KERNEL); 1694 if (master_image == NULL) { 1695 dev_err(&pdev->dev, "Failed to allocate memory for " 1696 "master resource structure\n"); 1697 retval = -ENOMEM; 1698 goto err_master; 1699 } 1700 master_image->parent = ca91cx42_bridge; 1701 spin_lock_init(&master_image->lock); 1702 master_image->locked = 0; 1703 master_image->number = i; 1704 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | 1705 VME_CRCSR | VME_USER1 | VME_USER2; 1706 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | 1707 VME_SUPER | VME_USER | VME_PROG | VME_DATA; 1708 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64; 1709 memset(&master_image->bus_resource, 0, 1710 sizeof(struct resource)); 1711 master_image->kern_base = NULL; 1712 list_add_tail(&master_image->list, 1713 &ca91cx42_bridge->master_resources); 1714 } 1715 1716 /* Add slave windows to list */ 1717 INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources); 1718 for (i = 0; i < CA91C142_MAX_SLAVE; i++) { 1719 slave_image = kmalloc(sizeof(struct vme_slave_resource), 1720 GFP_KERNEL); 1721 if (slave_image == NULL) { 1722 dev_err(&pdev->dev, "Failed to allocate memory for " 1723 "slave resource structure\n"); 1724 retval = -ENOMEM; 1725 goto err_slave; 1726 } 1727 slave_image->parent = ca91cx42_bridge; 1728 mutex_init(&slave_image->mtx); 1729 slave_image->locked = 0; 1730 slave_image->number = i; 1731 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 | 1732 VME_USER2; 1733 1734 /* Only windows 0 and 4 support A16 */ 1735 if (i == 0 || i == 4) 1736 slave_image->address_attr |= VME_A16; 1737 1738 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | 1739 VME_SUPER | VME_USER | VME_PROG | VME_DATA; 1740 list_add_tail(&slave_image->list, 1741 &ca91cx42_bridge->slave_resources); 1742 } 1743 1744 /* Add dma engines to list */ 1745 INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources); 1746 for (i = 0; i < CA91C142_MAX_DMA; i++) { 1747 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource), 1748 GFP_KERNEL); 1749 if (dma_ctrlr == NULL) { 1750 dev_err(&pdev->dev, "Failed to allocate memory for " 1751 "dma resource structure\n"); 1752 retval = -ENOMEM; 1753 goto err_dma; 1754 } 1755 dma_ctrlr->parent = ca91cx42_bridge; 1756 mutex_init(&dma_ctrlr->mtx); 1757 dma_ctrlr->locked = 0; 1758 dma_ctrlr->number = i; 1759 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM | 1760 VME_DMA_MEM_TO_VME; 1761 INIT_LIST_HEAD(&dma_ctrlr->pending); 1762 INIT_LIST_HEAD(&dma_ctrlr->running); 1763 list_add_tail(&dma_ctrlr->list, 1764 &ca91cx42_bridge->dma_resources); 1765 } 1766 1767 /* Add location monitor to list */ 1768 INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources); 1769 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); 1770 if (lm == NULL) { 1771 dev_err(&pdev->dev, "Failed to allocate memory for " 1772 "location monitor resource structure\n"); 1773 retval = -ENOMEM; 1774 goto err_lm; 1775 } 1776 lm->parent = ca91cx42_bridge; 1777 mutex_init(&lm->mtx); 1778 lm->locked = 0; 1779 lm->number = 1; 1780 lm->monitors = 4; 1781 list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources); 1782 1783 ca91cx42_bridge->slave_get = ca91cx42_slave_get; 1784 ca91cx42_bridge->slave_set = ca91cx42_slave_set; 1785 ca91cx42_bridge->master_get = ca91cx42_master_get; 1786 ca91cx42_bridge->master_set = ca91cx42_master_set; 1787 ca91cx42_bridge->master_read = ca91cx42_master_read; 1788 ca91cx42_bridge->master_write = ca91cx42_master_write; 1789 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw; 1790 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add; 1791 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec; 1792 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty; 1793 ca91cx42_bridge->irq_set = ca91cx42_irq_set; 1794 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate; 1795 ca91cx42_bridge->lm_set = ca91cx42_lm_set; 1796 ca91cx42_bridge->lm_get = ca91cx42_lm_get; 1797 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach; 1798 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach; 1799 ca91cx42_bridge->slot_get = ca91cx42_slot_get; 1800 ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent; 1801 ca91cx42_bridge->free_consistent = ca91cx42_free_consistent; 1802 1803 data = ioread32(ca91cx42_device->base + MISC_CTL); 1804 dev_info(&pdev->dev, "Board is%s the VME system controller\n", 1805 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not"); 1806 dev_info(&pdev->dev, "Slot ID is %d\n", 1807 ca91cx42_slot_get(ca91cx42_bridge)); 1808 1809 if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev)) 1810 dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); 1811 1812 /* Need to save ca91cx42_bridge pointer locally in link list for use in 1813 * ca91cx42_remove() 1814 */ 1815 retval = vme_register_bridge(ca91cx42_bridge); 1816 if (retval != 0) { 1817 dev_err(&pdev->dev, "Chip Registration failed.\n"); 1818 goto err_reg; 1819 } 1820 1821 pci_set_drvdata(pdev, ca91cx42_bridge); 1822 1823 return 0; 1824 1825err_reg: 1826 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev); 1827err_lm: 1828 /* resources are stored in link list */ 1829 list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) { 1830 lm = list_entry(pos, struct vme_lm_resource, list); 1831 list_del(pos); 1832 kfree(lm); 1833 } 1834err_dma: 1835 /* resources are stored in link list */ 1836 list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) { 1837 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); 1838 list_del(pos); 1839 kfree(dma_ctrlr); 1840 } 1841err_slave: 1842 /* resources are stored in link list */ 1843 list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) { 1844 slave_image = list_entry(pos, struct vme_slave_resource, list); 1845 list_del(pos); 1846 kfree(slave_image); 1847 } 1848err_master: 1849 /* resources are stored in link list */ 1850 list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) { 1851 master_image = list_entry(pos, struct vme_master_resource, 1852 list); 1853 list_del(pos); 1854 kfree(master_image); 1855 } 1856 1857 ca91cx42_irq_exit(ca91cx42_device, pdev); 1858err_irq: 1859err_test: 1860 iounmap(ca91cx42_device->base); 1861err_remap: 1862 pci_release_regions(pdev); 1863err_resource: 1864 pci_disable_device(pdev); 1865err_enable: 1866 kfree(ca91cx42_device); 1867err_driver: 1868 kfree(ca91cx42_bridge); 1869err_struct: 1870 return retval; 1871 1872} 1873 1874static void ca91cx42_remove(struct pci_dev *pdev) 1875{ 1876 struct list_head *pos = NULL, *n; 1877 struct vme_master_resource *master_image; 1878 struct vme_slave_resource *slave_image; 1879 struct vme_dma_resource *dma_ctrlr; 1880 struct vme_lm_resource *lm; 1881 struct ca91cx42_driver *bridge; 1882 struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev); 1883 1884 bridge = ca91cx42_bridge->driver_priv; 1885 1886 1887 /* Turn off Ints */ 1888 iowrite32(0, bridge->base + LINT_EN); 1889 1890 /* Turn off the windows */ 1891 iowrite32(0x00800000, bridge->base + LSI0_CTL); 1892 iowrite32(0x00800000, bridge->base + LSI1_CTL); 1893 iowrite32(0x00800000, bridge->base + LSI2_CTL); 1894 iowrite32(0x00800000, bridge->base + LSI3_CTL); 1895 iowrite32(0x00800000, bridge->base + LSI4_CTL); 1896 iowrite32(0x00800000, bridge->base + LSI5_CTL); 1897 iowrite32(0x00800000, bridge->base + LSI6_CTL); 1898 iowrite32(0x00800000, bridge->base + LSI7_CTL); 1899 iowrite32(0x00F00000, bridge->base + VSI0_CTL); 1900 iowrite32(0x00F00000, bridge->base + VSI1_CTL); 1901 iowrite32(0x00F00000, bridge->base + VSI2_CTL); 1902 iowrite32(0x00F00000, bridge->base + VSI3_CTL); 1903 iowrite32(0x00F00000, bridge->base + VSI4_CTL); 1904 iowrite32(0x00F00000, bridge->base + VSI5_CTL); 1905 iowrite32(0x00F00000, bridge->base + VSI6_CTL); 1906 iowrite32(0x00F00000, bridge->base + VSI7_CTL); 1907 1908 vme_unregister_bridge(ca91cx42_bridge); 1909 1910 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev); 1911 1912 /* resources are stored in link list */ 1913 list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) { 1914 lm = list_entry(pos, struct vme_lm_resource, list); 1915 list_del(pos); 1916 kfree(lm); 1917 } 1918 1919 /* resources are stored in link list */ 1920 list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) { 1921 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); 1922 list_del(pos); 1923 kfree(dma_ctrlr); 1924 } 1925 1926 /* resources are stored in link list */ 1927 list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) { 1928 slave_image = list_entry(pos, struct vme_slave_resource, list); 1929 list_del(pos); 1930 kfree(slave_image); 1931 } 1932 1933 /* resources are stored in link list */ 1934 list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) { 1935 master_image = list_entry(pos, struct vme_master_resource, 1936 list); 1937 list_del(pos); 1938 kfree(master_image); 1939 } 1940 1941 ca91cx42_irq_exit(bridge, pdev); 1942 1943 iounmap(bridge->base); 1944 1945 pci_release_regions(pdev); 1946 1947 pci_disable_device(pdev); 1948 1949 kfree(ca91cx42_bridge); 1950} 1951 1952module_pci_driver(ca91cx42_driver); 1953 1954MODULE_PARM_DESC(geoid, "Override geographical addressing"); 1955module_param(geoid, int, 0); 1956 1957MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge"); 1958MODULE_LICENSE("GPL");