Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git

+8660 -2003
+19
arch/ia64/Kconfig
··· 217 217 If you are compiling a kernel that will run under SGI's IA-64 218 218 simulator (Medusa) then say Y, otherwise say N. 219 219 220 + config IA64_SGI_SN_XP 221 + tristate "Support communication between SGI SSIs" 222 + depends on MSPEC 223 + help 224 + An SGI machine can be divided into multiple Single System 225 + Images which act independently of each other and have 226 + hardware based memory protection from the others. Enabling 227 + this feature will allow for direct communication between SSIs 228 + based on a network adapter and DMA messaging. 229 + 220 230 config FORCE_MAX_ZONEORDER 221 231 int 222 232 default "18" ··· 270 260 Say Y here to experiment with turning CPUs off and on. CPUs 271 261 can be controlled through /sys/devices/system/cpu/cpu#. 272 262 Say N if you want to disable CPU hotplug. 263 + 264 + config SCHED_SMT 265 + bool "SMT scheduler support" 266 + depends on SMP 267 + default off 268 + help 269 + Improves the CPU scheduler's decision making when dealing with 270 + Intel IA64 chips with MultiThreading at a cost of slightly increased 271 + overhead in some places. If unsure say N here. 273 272 274 273 config PREEMPT 275 274 bool "Preemptible Kernel"
+57 -39
arch/ia64/configs/tiger_defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.11-rc2 4 - # Sat Jan 22 11:17:02 2005 3 + # Linux kernel version: 2.6.12-rc3 4 + # Tue May 3 15:55:04 2005 5 5 # 6 6 7 7 # ··· 10 10 CONFIG_EXPERIMENTAL=y 11 11 CONFIG_CLEAN_COMPILE=y 12 12 CONFIG_LOCK_KERNEL=y 13 + CONFIG_INIT_ENV_ARG_LIMIT=32 13 14 14 15 # 15 16 # General setup ··· 22 21 # CONFIG_BSD_PROCESS_ACCT is not set 23 22 CONFIG_SYSCTL=y 24 23 # CONFIG_AUDIT is not set 25 - CONFIG_LOG_BUF_SHIFT=20 26 24 CONFIG_HOTPLUG=y 27 25 CONFIG_KOBJECT_UEVENT=y 28 26 CONFIG_IKCONFIG=y 29 27 CONFIG_IKCONFIG_PROC=y 28 + # CONFIG_CPUSETS is not set 30 29 # CONFIG_EMBEDDED is not set 31 30 CONFIG_KALLSYMS=y 32 31 CONFIG_KALLSYMS_ALL=y 33 32 # CONFIG_KALLSYMS_EXTRA_PASS is not set 33 + CONFIG_PRINTK=y 34 + CONFIG_BUG=y 35 + CONFIG_BASE_FULL=y 34 36 CONFIG_FUTEX=y 35 37 CONFIG_EPOLL=y 36 - # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 37 38 CONFIG_SHMEM=y 38 39 CONFIG_CC_ALIGN_FUNCTIONS=0 39 40 CONFIG_CC_ALIGN_LABELS=0 40 41 CONFIG_CC_ALIGN_LOOPS=0 41 42 CONFIG_CC_ALIGN_JUMPS=0 42 43 # CONFIG_TINY_SHMEM is not set 44 + CONFIG_BASE_SMALL=0 43 45 44 46 # 45 47 # Loadable module support ··· 89 85 CONFIG_SMP=y 90 86 CONFIG_NR_CPUS=4 91 87 CONFIG_HOTPLUG_CPU=y 88 + # CONFIG_SCHED_SMT is not set 92 89 # CONFIG_PREEMPT is not set 93 90 CONFIG_HAVE_DEC_LOCK=y 94 91 CONFIG_IA32_SUPPORT=y ··· 140 135 # CONFIG_PCI_MSI is not set 141 136 CONFIG_PCI_LEGACY_PROC=y 142 137 CONFIG_PCI_NAMES=y 138 + # CONFIG_PCI_DEBUG is not set 143 139 144 140 # 145 141 # PCI Hotplug Support ··· 156 150 # PCCARD (PCMCIA/CardBus) support 157 151 # 158 152 # CONFIG_PCCARD is not set 159 - 160 - # 161 - # PC-card bridges 162 - # 163 153 164 154 # 165 155 # Device Drivers ··· 197 195 CONFIG_BLK_DEV_NBD=m 198 196 # CONFIG_BLK_DEV_SX8 is not set 199 197 # CONFIG_BLK_DEV_UB is not set 200 - CONFIG_BLK_DEV_RAM=m 198 + CONFIG_BLK_DEV_RAM=y 201 199 CONFIG_BLK_DEV_RAM_COUNT=16 202 200 CONFIG_BLK_DEV_RAM_SIZE=4096 201 + CONFIG_BLK_DEV_INITRD=y 203 202 CONFIG_INITRAMFS_SOURCE="" 204 203 # CONFIG_CDROM_PKTCDVD is not set 205 204 ··· 316 313 # CONFIG_SCSI_BUSLOGIC is not set 317 314 # CONFIG_SCSI_DMX3191D is not set 318 315 # CONFIG_SCSI_EATA is not set 319 - # CONFIG_SCSI_EATA_PIO is not set 320 316 # CONFIG_SCSI_FUTURE_DOMAIN is not set 321 317 # CONFIG_SCSI_GDTH is not set 322 318 # CONFIG_SCSI_IPS is not set ··· 327 325 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 328 326 # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set 329 327 # CONFIG_SCSI_IPR is not set 330 - # CONFIG_SCSI_QLOGIC_ISP is not set 331 328 CONFIG_SCSI_QLOGIC_FC=y 332 329 # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set 333 330 CONFIG_SCSI_QLOGIC_1280=y ··· 337 336 CONFIG_SCSI_QLA2300=m 338 337 CONFIG_SCSI_QLA2322=m 339 338 # CONFIG_SCSI_QLA6312 is not set 339 + # CONFIG_SCSI_LPFC is not set 340 340 # CONFIG_SCSI_DC395x is not set 341 341 # CONFIG_SCSI_DC390T is not set 342 342 # CONFIG_SCSI_DEBUG is not set ··· 360 358 CONFIG_DM_SNAPSHOT=m 361 359 CONFIG_DM_MIRROR=m 362 360 CONFIG_DM_ZERO=m 361 + # CONFIG_DM_MULTIPATH is not set 363 362 364 363 # 365 364 # Fusion MPT device support ··· 389 386 # 390 387 CONFIG_PACKET=y 391 388 # CONFIG_PACKET_MMAP is not set 392 - CONFIG_NETLINK_DEV=y 393 389 CONFIG_UNIX=y 394 390 # CONFIG_NET_KEY is not set 395 391 CONFIG_INET=y ··· 448 446 # CONFIG_BONDING is not set 449 447 # CONFIG_EQUALIZER is not set 450 448 # CONFIG_TUN is not set 451 - # CONFIG_ETHERTAP is not set 452 449 453 450 # 454 451 # ARCnet devices ··· 485 484 # CONFIG_DGRS is not set 486 485 CONFIG_EEPRO100=m 487 486 CONFIG_E100=m 488 - # CONFIG_E100_NAPI is not set 489 487 # CONFIG_FEALNX is not set 490 488 # CONFIG_NATSEMI is not set 491 489 # CONFIG_NE2K_PCI is not set ··· 566 566 # CONFIG_INPUT_EVBUG is not set 567 567 568 568 # 569 - # Input I/O drivers 570 - # 571 - CONFIG_GAMEPORT=m 572 - CONFIG_SOUND_GAMEPORT=m 573 - # CONFIG_GAMEPORT_NS558 is not set 574 - # CONFIG_GAMEPORT_L4 is not set 575 - # CONFIG_GAMEPORT_EMU10K1 is not set 576 - # CONFIG_GAMEPORT_VORTEX is not set 577 - # CONFIG_GAMEPORT_FM801 is not set 578 - # CONFIG_GAMEPORT_CS461X is not set 579 - CONFIG_SERIO=y 580 - CONFIG_SERIO_I8042=y 581 - # CONFIG_SERIO_SERPORT is not set 582 - # CONFIG_SERIO_CT82C710 is not set 583 - # CONFIG_SERIO_PCIPS2 is not set 584 - CONFIG_SERIO_LIBPS2=y 585 - # CONFIG_SERIO_RAW is not set 586 - 587 - # 588 569 # Input Device Drivers 589 570 # 590 571 CONFIG_INPUT_KEYBOARD=y ··· 583 602 # CONFIG_INPUT_MISC is not set 584 603 585 604 # 605 + # Hardware I/O ports 606 + # 607 + CONFIG_SERIO=y 608 + CONFIG_SERIO_I8042=y 609 + # CONFIG_SERIO_SERPORT is not set 610 + # CONFIG_SERIO_PCIPS2 is not set 611 + CONFIG_SERIO_LIBPS2=y 612 + # CONFIG_SERIO_RAW is not set 613 + CONFIG_GAMEPORT=m 614 + # CONFIG_GAMEPORT_NS558 is not set 615 + # CONFIG_GAMEPORT_L4 is not set 616 + # CONFIG_GAMEPORT_EMU10K1 is not set 617 + # CONFIG_GAMEPORT_VORTEX is not set 618 + # CONFIG_GAMEPORT_FM801 is not set 619 + # CONFIG_GAMEPORT_CS461X is not set 620 + CONFIG_SOUND_GAMEPORT=m 621 + 622 + # 586 623 # Character devices 587 624 # 588 625 CONFIG_VT=y ··· 614 615 # CONFIG_SYNCLINK is not set 615 616 # CONFIG_SYNCLINKMP is not set 616 617 # CONFIG_N_HDLC is not set 618 + # CONFIG_SPECIALIX is not set 619 + # CONFIG_SX is not set 617 620 # CONFIG_STALDRV is not set 618 621 619 622 # ··· 636 635 # 637 636 CONFIG_SERIAL_CORE=y 638 637 CONFIG_SERIAL_CORE_CONSOLE=y 638 + # CONFIG_SERIAL_JSM is not set 639 639 CONFIG_UNIX98_PTYS=y 640 640 CONFIG_LEGACY_PTYS=y 641 641 CONFIG_LEGACY_PTY_COUNT=256 ··· 672 670 # CONFIG_HPET_RTC_IRQ is not set 673 671 CONFIG_HPET_MMAP=y 674 672 CONFIG_MAX_RAW_DEVS=256 673 + # CONFIG_HANGCHECK_TIMER is not set 674 + 675 + # 676 + # TPM devices 677 + # 678 + # CONFIG_TCG_TPM is not set 675 679 676 680 # 677 681 # I2C support ··· 713 705 # 714 706 CONFIG_VGA_CONSOLE=y 715 707 CONFIG_DUMMY_CONSOLE=y 716 - # CONFIG_BACKLIGHT_LCD_SUPPORT is not set 717 708 718 709 # 719 710 # Sound ··· 722 715 # 723 716 # USB support 724 717 # 718 + CONFIG_USB_ARCH_HAS_HCD=y 719 + CONFIG_USB_ARCH_HAS_OHCI=y 725 720 CONFIG_USB=y 726 721 # CONFIG_USB_DEBUG is not set 727 722 ··· 735 726 # CONFIG_USB_DYNAMIC_MINORS is not set 736 727 # CONFIG_USB_SUSPEND is not set 737 728 # CONFIG_USB_OTG is not set 738 - CONFIG_USB_ARCH_HAS_HCD=y 739 - CONFIG_USB_ARCH_HAS_OHCI=y 740 729 741 730 # 742 731 # USB Host Controller Drivers ··· 743 736 # CONFIG_USB_EHCI_SPLIT_ISO is not set 744 737 # CONFIG_USB_EHCI_ROOT_HUB_TT is not set 745 738 CONFIG_USB_OHCI_HCD=m 739 + # CONFIG_USB_OHCI_BIG_ENDIAN is not set 740 + CONFIG_USB_OHCI_LITTLE_ENDIAN=y 746 741 CONFIG_USB_UHCI_HCD=y 747 742 # CONFIG_USB_SL811_HCD is not set 748 743 ··· 760 751 # 761 752 CONFIG_USB_STORAGE=m 762 753 # CONFIG_USB_STORAGE_DEBUG is not set 763 - # CONFIG_USB_STORAGE_RW_DETECT is not set 764 754 # CONFIG_USB_STORAGE_DATAFAB is not set 765 755 # CONFIG_USB_STORAGE_FREECOM is not set 766 756 # CONFIG_USB_STORAGE_ISD200 is not set 767 757 # CONFIG_USB_STORAGE_DPCM is not set 768 - # CONFIG_USB_STORAGE_HP8200e is not set 758 + # CONFIG_USB_STORAGE_USBAT is not set 769 759 # CONFIG_USB_STORAGE_SDDR09 is not set 770 760 # CONFIG_USB_STORAGE_SDDR55 is not set 771 761 # CONFIG_USB_STORAGE_JUMPSHOT is not set ··· 808 800 # CONFIG_USB_PEGASUS is not set 809 801 # CONFIG_USB_RTL8150 is not set 810 802 # CONFIG_USB_USBNET is not set 803 + # CONFIG_USB_MON is not set 811 804 812 805 # 813 806 # USB port drivers ··· 833 824 # CONFIG_USB_PHIDGETKIT is not set 834 825 # CONFIG_USB_PHIDGETSERVO is not set 835 826 # CONFIG_USB_IDMOUSE is not set 827 + # CONFIG_USB_SISUSBVGA is not set 836 828 # CONFIG_USB_TEST is not set 837 829 838 830 # ··· 877 867 CONFIG_REISERFS_FS_SECURITY=y 878 868 # CONFIG_JFS_FS is not set 879 869 CONFIG_FS_POSIX_ACL=y 870 + 871 + # 872 + # XFS support 873 + # 880 874 CONFIG_XFS_FS=y 875 + CONFIG_XFS_EXPORT=y 881 876 # CONFIG_XFS_RT is not set 882 877 # CONFIG_XFS_QUOTA is not set 883 878 # CONFIG_XFS_SECURITY is not set ··· 960 945 CONFIG_NFSD_TCP=y 961 946 CONFIG_LOCKD=m 962 947 CONFIG_LOCKD_V4=y 963 - CONFIG_EXPORTFS=m 948 + CONFIG_EXPORTFS=y 964 949 CONFIG_SUNRPC=m 965 950 CONFIG_SUNRPC_GSS=m 966 951 CONFIG_RPCSEC_GSS_KRB5=m ··· 1057 1042 # 1058 1043 # Kernel hacking 1059 1044 # 1045 + # CONFIG_PRINTK_TIME is not set 1060 1046 CONFIG_DEBUG_KERNEL=y 1061 1047 CONFIG_MAGIC_SYSRQ=y 1048 + CONFIG_LOG_BUF_SHIFT=20 1062 1049 # CONFIG_SCHEDSTATS is not set 1063 1050 # CONFIG_DEBUG_SLAB is not set 1064 1051 # CONFIG_DEBUG_SPINLOCK is not set ··· 1094 1077 # CONFIG_CRYPTO_SHA256 is not set 1095 1078 # CONFIG_CRYPTO_SHA512 is not set 1096 1079 # CONFIG_CRYPTO_WP512 is not set 1080 + # CONFIG_CRYPTO_TGR192 is not set 1097 1081 CONFIG_CRYPTO_DES=m 1098 1082 # CONFIG_CRYPTO_BLOWFISH is not set 1099 1083 # CONFIG_CRYPTO_TWOFISH is not set
+4 -30
arch/ia64/hp/common/sba_iommu.c
··· 1944 1944 static void __init 1945 1945 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) 1946 1946 { 1947 - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 1948 - union acpi_object *obj; 1949 - acpi_handle phandle; 1950 1947 unsigned int node; 1948 + int pxm; 1951 1949 1952 1950 ioc->node = MAX_NUMNODES; 1953 1951 1954 - /* 1955 - * Check for a _PXM on this node first. We don't typically see 1956 - * one here, so we'll end up getting it from the parent. 1957 - */ 1958 - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) { 1959 - if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) 1960 - return; 1952 + pxm = acpi_get_pxm(handle); 1961 1953 1962 - /* Reset the acpi buffer */ 1963 - buffer.length = ACPI_ALLOCATE_BUFFER; 1964 - buffer.pointer = NULL; 1965 - 1966 - if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL, 1967 - &buffer))) 1968 - return; 1969 - } 1970 - 1971 - if (!buffer.length || !buffer.pointer) 1954 + if (pxm < 0) 1972 1955 return; 1973 1956 1974 - obj = buffer.pointer; 1975 - 1976 - if (obj->type != ACPI_TYPE_INTEGER || 1977 - obj->integer.value >= MAX_PXM_DOMAINS) { 1978 - acpi_os_free(buffer.pointer); 1979 - return; 1980 - } 1981 - 1982 - node = pxm_to_nid_map[obj->integer.value]; 1983 - acpi_os_free(buffer.pointer); 1957 + node = pxm_to_nid_map[pxm]; 1984 1958 1985 1959 if (node >= MAX_NUMNODES || !node_online(node)) 1986 1960 return;
+5 -18
arch/ia64/kernel/acpi.c
··· 779 779 union acpi_object *obj; 780 780 struct acpi_table_iosapic *iosapic; 781 781 unsigned int gsi_base; 782 - int node; 782 + int pxm, node; 783 783 784 784 /* Only care about objects w/ a method that returns the MADT */ 785 785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) ··· 805 805 gsi_base = iosapic->global_irq_base; 806 806 807 807 acpi_os_free(buffer.pointer); 808 - buffer.length = ACPI_ALLOCATE_BUFFER; 809 - buffer.pointer = NULL; 810 808 811 809 /* 812 - * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell 810 + * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell 813 811 * us which node to associate this with. 814 812 */ 815 - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) 813 + pxm = acpi_get_pxm(handle); 814 + if (pxm < 0) 816 815 return AE_OK; 817 816 818 - if (!buffer.length || !buffer.pointer) 819 - return AE_OK; 820 - 821 - obj = buffer.pointer; 822 - 823 - if (obj->type != ACPI_TYPE_INTEGER || 824 - obj->integer.value >= MAX_PXM_DOMAINS) { 825 - acpi_os_free(buffer.pointer); 826 - return AE_OK; 827 - } 828 - 829 - node = pxm_to_nid_map[obj->integer.value]; 830 - acpi_os_free(buffer.pointer); 817 + node = pxm_to_nid_map[pxm]; 831 818 832 819 if (node >= MAX_NUMNODES || !node_online(node) || 833 820 cpus_empty(node_to_cpumask(node)))
+1 -1
arch/ia64/kernel/entry.S
··· 782 782 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 783 783 .mem.offset 8,0 784 784 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 785 - END(ia64_ret_from_ia32_execve_syscall) 785 + END(ia64_ret_from_ia32_execve) 786 786 // fall through 787 787 #endif /* CONFIG_IA32_SUPPORT */ 788 788 GLOBAL_ENTRY(ia64_leave_kernel)
+3 -1
arch/ia64/kernel/fsys.S
··· 611 611 movl r2=ia64_ret_from_syscall 612 612 ;; 613 613 mov rp=r2 // set the real return addr 614 - tbit.z p8,p0=r3,TIF_SYSCALL_TRACE 614 + and r3=_TIF_SYSCALL_TRACEAUDIT,r3 615 615 ;; 616 + cmp.eq p8,p0=r3,r0 617 + 616 618 (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 617 619 (p8) br.call.sptk.many b6=b6 // ignore this return addr 618 620 br.cond.sptk ia64_trace_syscall
+2 -2
arch/ia64/kernel/mca_drv.c
··· 132 132 spin_unlock(&mca_bh_lock); 133 133 134 134 /* This process is about to be killed itself */ 135 - force_sig(SIGKILL, current); 136 - schedule(); 135 + do_exit(SIGKILL); 137 136 } 138 137 139 138 /** ··· 438 439 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 439 440 psr2->cpl = 0; 440 441 psr2->ri = 0; 442 + psr2->i = 0; 441 443 442 444 return 1; 443 445 }
+15 -3
arch/ia64/kernel/mca_drv_asm.S
··· 10 10 11 11 #include <asm/asmmacro.h> 12 12 #include <asm/processor.h> 13 + #include <asm/ptrace.h> 13 14 14 15 GLOBAL_ENTRY(mca_handler_bhhook) 15 16 invala // clear RSE ? ··· 21 20 ;; 22 21 alloc r16=ar.pfs,0,2,1,0 // make a new frame 23 22 ;; 23 + mov ar.rsc=0 24 + ;; 24 25 mov r13=IA64_KR(CURRENT) // current task pointer 25 26 ;; 26 - adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 27 + mov r2=r13 27 28 ;; 28 - ld8 r12=[r12] // stack pointer 29 + addl r22=IA64_RBS_OFFSET,r2 29 30 ;; 31 + mov ar.bspstore=r22 32 + ;; 33 + addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 34 + ;; 35 + adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 36 + ;; 37 + st1 [r2]=r0 // clear current->thread.on_ustack flag 30 38 mov loc0=r16 31 39 movl loc1=mca_handler_bh // recovery C function 32 40 ;; ··· 44 34 ;; 45 35 mov loc1=rp 46 36 ;; 47 - br.call.sptk.many rp=b6 // not return ... 37 + ssm psr.i 38 + ;; 39 + br.call.sptk.many rp=b6 // does not return ... 48 40 ;; 49 41 mov ar.pfs=loc0 50 42 mov rp=loc1
+33 -10
arch/ia64/kernel/perfmon.c
··· 1265 1265 } 1266 1266 EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1267 1267 1268 + extern void update_pal_halt_status(int); 1269 + 1268 1270 static int 1269 1271 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1270 1272 { ··· 1312 1310 pfm_sessions.pfs_sys_use_dbregs, 1313 1311 is_syswide, 1314 1312 cpu)); 1313 + 1314 + /* 1315 + * disable default_idle() to go to PAL_HALT 1316 + */ 1317 + update_pal_halt_status(0); 1315 1318 1316 1319 UNLOCK_PFS(flags); 1317 1320 ··· 1372 1365 pfm_sessions.pfs_sys_use_dbregs, 1373 1366 is_syswide, 1374 1367 cpu)); 1368 + 1369 + /* 1370 + * if possible, enable default_idle() to go into PAL_HALT 1371 + */ 1372 + if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) 1373 + update_pal_halt_status(1); 1375 1374 1376 1375 UNLOCK_PFS(flags); 1377 1376 ··· 4215 4202 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", 4216 4203 req->load_pid, 4217 4204 ctx->ctx_state)); 4218 - return -EINVAL; 4205 + return -EBUSY; 4219 4206 } 4220 4207 4221 4208 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); ··· 4717 4704 if (task == current || ctx->ctx_fl_system) return 0; 4718 4705 4719 4706 /* 4720 - * if context is UNLOADED we are safe to go 4707 + * we are monitoring another thread 4721 4708 */ 4722 - if (state == PFM_CTX_UNLOADED) return 0; 4723 - 4724 - /* 4725 - * no command can operate on a zombie context 4726 - */ 4727 - if (state == PFM_CTX_ZOMBIE) { 4728 - DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); 4729 - return -EINVAL; 4709 + switch(state) { 4710 + case PFM_CTX_UNLOADED: 4711 + /* 4712 + * if context is UNLOADED we are safe to go 4713 + */ 4714 + return 0; 4715 + case PFM_CTX_ZOMBIE: 4716 + /* 4717 + * no command can operate on a zombie context 4718 + */ 4719 + DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); 4720 + return -EINVAL; 4721 + case PFM_CTX_MASKED: 4722 + /* 4723 + * PMU state has been saved to software even though 4724 + * the thread may still be running. 4725 + */ 4726 + if (cmd != PFM_UNLOAD_CONTEXT) return 0; 4730 4727 } 4731 4728 4732 4729 /*
+36 -19
arch/ia64/kernel/process.c
··· 50 50 #include "sigframe.h" 51 51 52 52 void (*ia64_mark_idle)(int); 53 - static cpumask_t cpu_idle_map; 53 + static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 54 54 55 55 unsigned long boot_option_idle_override = 0; 56 56 EXPORT_SYMBOL(boot_option_idle_override); ··· 173 173 ia64_do_signal(oldset, scr, in_syscall); 174 174 } 175 175 176 - static int pal_halt = 1; 176 + static int pal_halt = 1; 177 + static int can_do_pal_halt = 1; 178 + 177 179 static int __init nohalt_setup(char * str) 178 180 { 179 181 pal_halt = 0; ··· 183 181 } 184 182 __setup("nohalt", nohalt_setup); 185 183 184 + void 185 + update_pal_halt_status(int status) 186 + { 187 + can_do_pal_halt = pal_halt && status; 188 + } 189 + 186 190 /* 187 191 * We use this if we don't have any better idle routine.. 188 192 */ 189 193 void 190 194 default_idle (void) 191 195 { 192 - unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP); 193 - 194 196 while (!need_resched()) 195 - if (pal_halt && !pmu_active) 197 + if (can_do_pal_halt) 196 198 safe_halt(); 197 199 else 198 200 cpu_relax(); ··· 229 223 } 230 224 #endif /* CONFIG_HOTPLUG_CPU */ 231 225 232 - 233 226 void cpu_idle_wait(void) 234 227 { 235 - int cpu; 236 - cpumask_t map; 228 + unsigned int cpu, this_cpu = get_cpu(); 229 + cpumask_t map; 237 230 238 - for_each_online_cpu(cpu) 239 - cpu_set(cpu, cpu_idle_map); 231 + set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); 232 + put_cpu(); 240 233 241 - wmb(); 242 - do { 243 - ssleep(1); 244 - cpus_and(map, cpu_idle_map, cpu_online_map); 245 - } while (!cpus_empty(map)); 234 + cpus_clear(map); 235 + for_each_online_cpu(cpu) { 236 + per_cpu(cpu_idle_state, cpu) = 1; 237 + cpu_set(cpu, map); 238 + } 239 + 240 + __get_cpu_var(cpu_idle_state) = 0; 241 + 242 + wmb(); 243 + do { 244 + ssleep(1); 245 + for_each_online_cpu(cpu) { 246 + if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) 247 + cpu_clear(cpu, map); 248 + } 249 + cpus_and(map, map, cpu_online_map); 250 + } while (!cpus_empty(map)); 246 251 } 247 252 EXPORT_SYMBOL_GPL(cpu_idle_wait); 248 253 ··· 261 244 cpu_idle (void) 262 245 { 263 246 void (*mark_idle)(int) = ia64_mark_idle; 264 - int cpu = smp_processor_id(); 265 247 266 248 /* endless idle loop with no priority at all */ 267 249 while (1) { ··· 271 255 while (!need_resched()) { 272 256 void (*idle)(void); 273 257 258 + if (__get_cpu_var(cpu_idle_state)) 259 + __get_cpu_var(cpu_idle_state) = 0; 260 + 261 + rmb(); 274 262 if (mark_idle) 275 263 (*mark_idle)(1); 276 264 277 - if (cpu_isset(cpu, cpu_idle_map)) 278 - cpu_clear(cpu, cpu_idle_map); 279 - rmb(); 280 265 idle = pm_idle; 281 266 if (!idle) 282 267 idle = default_idle;
+2 -1
arch/ia64/kernel/signal.c
··· 224 224 * could be corrupted. 225 225 */ 226 226 retval = (long) &ia64_leave_kernel; 227 - if (test_thread_flag(TIF_SYSCALL_TRACE)) 227 + if (test_thread_flag(TIF_SYSCALL_TRACE) 228 + || test_thread_flag(TIF_SYSCALL_AUDIT)) 228 229 /* 229 230 * strace expects to be notified after sigreturn returns even though the 230 231 * context to which we return may not be in the middle of a syscall.
+3 -3
arch/ia64/lib/flush.S
··· 1 1 /* 2 2 * Cache flushing routines. 3 3 * 4 - * Copyright (C) 1999-2001 Hewlett-Packard Co 5 - * Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com> 4 + * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co 5 + * David Mosberger-Tang <davidm@hpl.hp.com> 6 6 */ 7 7 #include <asm/asmmacro.h> 8 8 #include <asm/page.h> ··· 26 26 27 27 mov ar.lc=r8 28 28 ;; 29 - .Loop: fc in0 // issuable on M0 only 29 + .Loop: fc.i in0 // issuable on M2 only 30 30 add in0=32,in0 31 31 br.cloop.sptk.few .Loop 32 32 ;;
+1 -1
arch/ia64/lib/memcpy_mck.S
··· 75 75 mov f6=f0 76 76 br.cond.sptk .common_code 77 77 ;; 78 + END(memcpy) 78 79 GLOBAL_ENTRY(__copy_user) 79 80 .prologue 80 81 // check dest alignment ··· 525 524 #undef B 526 525 #undef C 527 526 #undef D 528 - END(memcpy) 529 527 530 528 /* 531 529 * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
+1 -1
arch/ia64/lib/memset.S
··· 57 57 { .mmi 58 58 .prologue 59 59 alloc tmp = ar.pfs, 3, 0, 0, 0 60 - .body 61 60 lfetch.nt1 [dest] // 62 61 .save ar.lc, save_lc 63 62 mov.i save_lc = ar.lc 63 + .body 64 64 } { .mmi 65 65 mov ret0 = dest // return value 66 66 cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
+6 -1
arch/ia64/sn/kernel/Makefile
··· 4 4 # License. See the file "COPYING" in the main directory of this archive 5 5 # for more details. 6 6 # 7 - # Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. 7 + # Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved. 8 8 # 9 9 10 10 obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 11 11 huberror.o io_init.o iomv.o klconflib.o sn2/ 12 12 obj-$(CONFIG_IA64_GENERIC) += machvec.o 13 13 obj-$(CONFIG_SGI_TIOCX) += tiocx.o 14 + obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o 15 + xp-y := xp_main.o xp_nofault.o 16 + obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o 17 + xpc-y := xpc_main.o xpc_channel.o xpc_partition.o 18 + obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
+6 -4
arch/ia64/sn/kernel/io_init.c
··· 174 174 if (status) 175 175 continue; 176 176 177 + /* Attach the error interrupt handlers */ 178 + if (nasid & 1) 179 + ice_error_init(hubdev); 180 + else 181 + hub_error_init(hubdev); 182 + 177 183 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) 178 184 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; 179 185 ··· 217 211 sn_flush_device_list; 218 212 } 219 213 220 - if (!(i & 1)) 221 - hub_error_init(hubdev); 222 - else 223 - ice_error_init(hubdev); 224 214 } 225 215 226 216 }
+21 -13
arch/ia64/sn/kernel/mca.c
··· 37 37 * This function is the callback routine that SAL calls to log error 38 38 * info for platform errors. buf is appended to sn_oemdata, resizing as 39 39 * required. 40 + * Note: this is a SAL to OS callback, running under the same rules as the SAL 41 + * code. SAL calls are run with preempt disabled so this routine must not 42 + * sleep. vmalloc can sleep so print_hook cannot resize the output buffer 43 + * itself, instead it must set the required size and return to let the caller 44 + * resize the buffer then redrive the SAL call. 40 45 */ 41 46 static int print_hook(const char *fmt, ...) 42 47 { ··· 52 47 vsnprintf(buf, sizeof(buf), fmt, args); 53 48 va_end(args); 54 49 len = strlen(buf); 55 - while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) { 56 - u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000); 57 - if (!newbuf) { 58 - printk(KERN_ERR "%s: unable to extend sn_oemdata\n", 59 - __FUNCTION__); 60 - return 0; 61 - } 62 - memcpy(newbuf, *sn_oemdata, *sn_oemdata_size); 63 - vfree(*sn_oemdata); 64 - *sn_oemdata = newbuf; 65 - } 66 - memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1); 50 + if (*sn_oemdata_size + len <= sn_oemdata_bufsize) 51 + memcpy(*sn_oemdata + *sn_oemdata_size, buf, len); 67 52 *sn_oemdata_size += len; 68 53 return 0; 69 54 } ··· 93 98 sn_oemdata = oemdata; 94 99 sn_oemdata_size = oemdata_size; 95 100 sn_oemdata_bufsize = 0; 96 - ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); 101 + *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */ 102 + while (*sn_oemdata_size > sn_oemdata_bufsize) { 103 + u8 *newbuf = vmalloc(*sn_oemdata_size); 104 + if (!newbuf) { 105 + printk(KERN_ERR "%s: unable to extend sn_oemdata\n", 106 + __FUNCTION__); 107 + return 1; 108 + } 109 + vfree(*sn_oemdata); 110 + *sn_oemdata = newbuf; 111 + sn_oemdata_bufsize = *sn_oemdata_size; 112 + *sn_oemdata_size = 0; 113 + ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); 114 + } 97 115 up(&sn_oemdata_mutex); 98 116 return 0; 99 117 }
+24 -16
arch/ia64/sn/kernel/setup.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved. 6 + * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. 7 7 */ 8 8 9 9 #include <linux/config.h> ··· 72 72 73 73 DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 74 74 EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 75 + 76 + DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); 77 + EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 78 + 79 + DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 80 + EXPORT_PER_CPU_SYMBOL(__sn_nodepda); 75 81 76 82 partid_t sn_partid = -1; 77 83 EXPORT_SYMBOL(sn_partid); ··· 379 373 { 380 374 cnodeid_t cnode; 381 375 382 - memset(pda->cnodeid_to_nasid_table, -1, 383 - sizeof(pda->cnodeid_to_nasid_table)); 376 + memset(sn_cnodeid_to_nasid, -1, 377 + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); 384 378 for_each_online_node(cnode) 385 - pda->cnodeid_to_nasid_table[cnode] = 386 - pxm_to_nasid(nid_to_pxm_map[cnode]); 379 + sn_cnodeid_to_nasid[cnode] = 380 + pxm_to_nasid(nid_to_pxm_map[cnode]); 387 381 388 382 numionodes = num_online_nodes(); 389 383 scan_for_ionodes(); ··· 483 477 484 478 cnode = nasid_to_cnodeid(nasid); 485 479 486 - pda->p_nodepda = nodepdaindr[cnode]; 480 + sn_nodepda = nodepdaindr[cnode]; 481 + 487 482 pda->led_address = 488 483 (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); 489 484 pda->led_state = LED_ALWAYS_SET; ··· 493 486 pda->idle_flag = 0; 494 487 495 488 if (cpuid != 0) { 496 - memcpy(pda->cnodeid_to_nasid_table, 497 - pdacpu(0)->cnodeid_to_nasid_table, 498 - sizeof(pda->cnodeid_to_nasid_table)); 489 + /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ 490 + memcpy(sn_cnodeid_to_nasid, 491 + (&per_cpu(__sn_cnodeid_to_nasid, 0)), 492 + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); 499 493 } 500 494 501 495 /* 502 496 * Check for WARs. 503 497 * Only needs to be done once, on BSP. 504 - * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. 498 + * Has to be done after loop above, because it uses this cpu's 499 + * sn_cnodeid_to_nasid table which was just initialized if this 500 + * isn't cpu 0. 505 501 * Has to be done before assignment below. 506 502 */ 507 503 if (!wars_have_been_checked) { ··· 590 580 brd = find_lboard_any(brd, KLTYPE_SNIA); 591 581 592 582 while (brd) { 593 - pda->cnodeid_to_nasid_table[numionodes] = 594 - brd->brd_nasid; 583 + sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; 595 584 physical_node_map[brd->brd_nasid] = numionodes; 596 585 root_lboard[numionodes] = brd; 597 586 numionodes++; ··· 611 602 root_lboard[nasid_to_cnodeid(nasid)], 612 603 KLTYPE_TIO); 613 604 while (brd) { 614 - pda->cnodeid_to_nasid_table[numionodes] = 615 - brd->brd_nasid; 605 + sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; 616 606 physical_node_map[brd->brd_nasid] = numionodes; 617 607 root_lboard[numionodes] = brd; 618 608 numionodes++; ··· 622 614 brd = find_lboard_any(brd, KLTYPE_TIO); 623 615 } 624 616 } 625 - 626 617 } 627 618 628 619 int ··· 630 623 long cpu; 631 624 632 625 for (cpu=0; cpu < NR_CPUS; cpu++) 633 - if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice) 626 + if (cpuid_to_nasid(cpu) == nasid && 627 + cpuid_to_slice(cpu) == slice) 634 628 return cpu; 635 629 636 630 return -1;
+32 -28
arch/ia64/sn/kernel/tiocx.c
··· 21 21 #include <asm/sn/types.h> 22 22 #include <asm/sn/shubio.h> 23 23 #include <asm/sn/tiocx.h> 24 + #include <asm/sn/l1.h> 25 + #include <asm/sn/module.h> 24 26 #include "tio.h" 25 27 #include "xtalk/xwidgetdev.h" 26 28 #include "xtalk/hubdev.h" ··· 310 308 } 311 309 } 312 310 313 - uint64_t 314 - tiocx_dma_addr(uint64_t addr) 311 + uint64_t tiocx_dma_addr(uint64_t addr) 315 312 { 316 313 return PHYS_TO_TIODMA(addr); 317 314 } 318 315 319 - uint64_t 320 - tiocx_swin_base(int nasid) 316 + uint64_t tiocx_swin_base(int nasid) 321 317 { 322 318 return TIO_SWIN_BASE(nasid, TIOCX_CORELET); 323 319 } ··· 329 329 EXPORT_SYMBOL(tiocx_bus_type); 330 330 EXPORT_SYMBOL(tiocx_dma_addr); 331 331 EXPORT_SYMBOL(tiocx_swin_base); 332 - 333 - static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address) 334 - { 335 - 336 - struct ia64_sal_retval ret_stuff; 337 - ret_stuff.status = 0; 338 - ret_stuff.v0 = 0; 339 - 340 - ia64_sal_oemcall_nolock(&ret_stuff, 341 - SN_SAL_IOIF_GET_HUBDEV_INFO, 342 - handle, address, 0, 0, 0, 0, 0); 343 - return ret_stuff.v0; 344 - } 345 332 346 333 static void tio_conveyor_set(nasid_t nasid, int enable_flag) 347 334 { ··· 366 379 udelay(2000); 367 380 } 368 381 369 - static int fpga_attached(nasid_t nasid) 382 + static int tiocx_btchar_get(int nasid) 383 + { 384 + moduleid_t module_id; 385 + geoid_t geoid; 386 + int cnodeid; 387 + 388 + cnodeid = nasid_to_cnodeid(nasid); 389 + geoid = cnodeid_get_geoid(cnodeid); 390 + module_id = geo_module(geoid); 391 + return MODULE_GET_BTCHAR(module_id); 392 + } 393 + 394 + static int is_fpga_brick(int nasid) 395 + { 396 + switch (tiocx_btchar_get(nasid)) { 397 + case L1_BRICKTYPE_SA: 398 + case L1_BRICKTYPE_ATHENA: 399 + return 1; 400 + } 401 + return 0; 402 + } 403 + 404 + static int bitstream_loaded(nasid_t nasid) 370 405 { 371 406 uint64_t cx_credits; 372 407 ··· 405 396 int mfg_num = CX_DEV_NONE; 406 397 nasid_t nasid = cx_dev->cx_id.nasid; 407 398 408 - if (fpga_attached(nasid)) { 399 + if (bitstream_loaded(nasid)) { 409 400 uint64_t cx_id; 410 401 411 402 cx_id = ··· 436 427 { 437 428 struct cx_dev *cx_dev = to_cx_dev(dev); 438 429 439 - return sprintf(buf, "0x%x 0x%x 0x%x\n", 430 + return sprintf(buf, "0x%x 0x%x 0x%x %d\n", 440 431 cx_dev->cx_id.nasid, 441 - cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num); 432 + cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, 433 + tiocx_btchar_get(cx_dev->cx_id.nasid)); 442 434 } 443 435 444 436 static ssize_t store_cxdev_control(struct device *dev, const char *buf, ··· 485 475 if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) 486 476 break; /* No more nasids .. bail out of loop */ 487 477 488 - if (nasid & 0x1) { /* TIO's are always odd */ 478 + if ((nasid & 0x1) && is_fpga_brick(nasid)) { 489 479 struct hubdev_info *hubdev; 490 - uint64_t status; 491 480 struct xwidget_info *widgetp; 492 481 493 482 DBG("Found TIO at nasid 0x%x\n", nasid); 494 483 495 484 hubdev = 496 485 (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); 497 - status = 498 - tiocx_get_hubdev_info(nasid, 499 - (uint64_t) __pa(hubdev)); 500 - if (status) 501 - continue; 502 486 503 487 widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; 504 488
+289
arch/ia64/sn/kernel/xp_main.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 + */ 8 + 9 + 10 + /* 11 + * Cross Partition (XP) base. 12 + * 13 + * XP provides a base from which its users can interact 14 + * with XPC, yet not be dependent on XPC. 15 + * 16 + */ 17 + 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/module.h> 22 + #include <asm/sn/intr.h> 23 + #include <asm/sn/sn_sal.h> 24 + #include <asm/sn/xp.h> 25 + 26 + 27 + /* 28 + * Target of nofault PIO read. 29 + */ 30 + u64 xp_nofault_PIOR_target; 31 + 32 + 33 + /* 34 + * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 35 + * users of XPC. 36 + */ 37 + struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 38 + 39 + 40 + /* 41 + * Initialize the XPC interface to indicate that XPC isn't loaded. 42 + */ 43 + static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } 44 + 45 + struct xpc_interface xpc_interface = { 46 + (void (*)(int)) xpc_notloaded, 47 + (void (*)(int)) xpc_notloaded, 48 + (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, 49 + (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, 50 + (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) 51 + xpc_notloaded, 52 + (void (*)(partid_t, int, void *)) xpc_notloaded, 53 + (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded 54 + }; 55 + 56 + 57 + /* 58 + * XPC calls this when it (the XPC module) has been loaded. 59 + */ 60 + void 61 + xpc_set_interface(void (*connect)(int), 62 + void (*disconnect)(int), 63 + enum xpc_retval (*allocate)(partid_t, int, u32, void **), 64 + enum xpc_retval (*send)(partid_t, int, void *), 65 + enum xpc_retval (*send_notify)(partid_t, int, void *, 66 + xpc_notify_func, void *), 67 + void (*received)(partid_t, int, void *), 68 + enum xpc_retval (*partid_to_nasids)(partid_t, void *)) 69 + { 70 + xpc_interface.connect = connect; 71 + xpc_interface.disconnect = disconnect; 72 + xpc_interface.allocate = allocate; 73 + xpc_interface.send = send; 74 + xpc_interface.send_notify = send_notify; 75 + xpc_interface.received = received; 76 + xpc_interface.partid_to_nasids = partid_to_nasids; 77 + } 78 + 79 + 80 + /* 81 + * XPC calls this when it (the XPC module) is being unloaded. 82 + */ 83 + void 84 + xpc_clear_interface(void) 85 + { 86 + xpc_interface.connect = (void (*)(int)) xpc_notloaded; 87 + xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; 88 + xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, 89 + void **)) xpc_notloaded; 90 + xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) 91 + xpc_notloaded; 92 + xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, 93 + xpc_notify_func, void *)) xpc_notloaded; 94 + xpc_interface.received = (void (*)(partid_t, int, void *)) 95 + xpc_notloaded; 96 + xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) 97 + xpc_notloaded; 98 + } 99 + 100 + 101 + /* 102 + * Register for automatic establishment of a channel connection whenever 103 + * a partition comes up. 104 + * 105 + * Arguments: 106 + * 107 + * ch_number - channel # to register for connection. 108 + * func - function to call for asynchronous notification of channel 109 + * state changes (i.e., connection, disconnection, error) and 110 + * the arrival of incoming messages. 111 + * key - pointer to optional user-defined value that gets passed back 112 + * to the user on any callouts made to func. 113 + * payload_size - size in bytes of the XPC message's payload area which 114 + * contains a user-defined message. The user should make 115 + * this large enough to hold their largest message. 116 + * nentries - max #of XPC message entries a message queue can contain. 117 + * The actual number, which is determined when a connection 118 + * is established and may be less then requested, will be 119 + * passed to the user via the xpcConnected callout. 120 + * assigned_limit - max number of kthreads allowed to be processing 121 + * messages (per connection) at any given instant. 122 + * idle_limit - max number of kthreads allowed to be idle at any given 123 + * instant. 124 + */ 125 + enum xpc_retval 126 + xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, 127 + u16 nentries, u32 assigned_limit, u32 idle_limit) 128 + { 129 + struct xpc_registration *registration; 130 + 131 + 132 + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 133 + DBUG_ON(payload_size == 0 || nentries == 0); 134 + DBUG_ON(func == NULL); 135 + DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); 136 + 137 + registration = &xpc_registrations[ch_number]; 138 + 139 + if (down_interruptible(&registration->sema) != 0) { 140 + return xpcInterrupted; 141 + } 142 + 143 + /* if XPC_CHANNEL_REGISTERED(ch_number) */ 144 + if (registration->func != NULL) { 145 + up(&registration->sema); 146 + return xpcAlreadyRegistered; 147 + } 148 + 149 + /* register the channel for connection */ 150 + registration->msg_size = XPC_MSG_SIZE(payload_size); 151 + registration->nentries = nentries; 152 + registration->assigned_limit = assigned_limit; 153 + registration->idle_limit = idle_limit; 154 + registration->key = key; 155 + registration->func = func; 156 + 157 + up(&registration->sema); 158 + 159 + xpc_interface.connect(ch_number); 160 + 161 + return xpcSuccess; 162 + } 163 + 164 + 165 + /* 166 + * Remove the registration for automatic connection of the specified channel 167 + * when a partition comes up. 168 + * 169 + * Before returning this xpc_disconnect() will wait for all connections on the 170 + * specified channel have been closed/torndown. So the caller can be assured 171 + * that they will not be receiving any more callouts from XPC to their 172 + * function registered via xpc_connect(). 173 + * 174 + * Arguments: 175 + * 176 + * ch_number - channel # to unregister. 177 + */ 178 + void 179 + xpc_disconnect(int ch_number) 180 + { 181 + struct xpc_registration *registration; 182 + 183 + 184 + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 185 + 186 + registration = &xpc_registrations[ch_number]; 187 + 188 + /* 189 + * We've decided not to make this a down_interruptible(), since we 190 + * figured XPC's users will just turn around and call xpc_disconnect() 191 + * again anyways, so we might as well wait, if need be. 192 + */ 193 + down(&registration->sema); 194 + 195 + /* if !XPC_CHANNEL_REGISTERED(ch_number) */ 196 + if (registration->func == NULL) { 197 + up(&registration->sema); 198 + return; 199 + } 200 + 201 + /* remove the connection registration for the specified channel */ 202 + registration->func = NULL; 203 + registration->key = NULL; 204 + registration->nentries = 0; 205 + registration->msg_size = 0; 206 + registration->assigned_limit = 0; 207 + registration->idle_limit = 0; 208 + 209 + xpc_interface.disconnect(ch_number); 210 + 211 + up(&registration->sema); 212 + 213 + return; 214 + } 215 + 216 + 217 + int __init 218 + xp_init(void) 219 + { 220 + int ret, ch_number; 221 + u64 func_addr = *(u64 *) xp_nofault_PIOR; 222 + u64 err_func_addr = *(u64 *) xp_error_PIOR; 223 + 224 + 225 + if (!ia64_platform_is("sn2")) { 226 + return -ENODEV; 227 + } 228 + 229 + /* 230 + * Register a nofault code region which performs a cross-partition 231 + * PIO read. If the PIO read times out, the MCA handler will consume 232 + * the error and return to a kernel-provided instruction to indicate 233 + * an error. This PIO read exists because it is guaranteed to timeout 234 + * if the destination is down (AMO operations do not timeout on at 235 + * least some CPUs on Shubs <= v1.2, which unfortunately we have to 236 + * work around). 237 + */ 238 + if ((ret = sn_register_nofault_code(func_addr, err_func_addr, 239 + err_func_addr, 1, 1)) != 0) { 240 + printk(KERN_ERR "XP: can't register nofault code, error=%d\n", 241 + ret); 242 + } 243 + /* 244 + * Setup the nofault PIO read target. (There is no special reason why 245 + * SH_IPI_ACCESS was selected.) 246 + */ 247 + if (is_shub2()) { 248 + xp_nofault_PIOR_target = SH2_IPI_ACCESS0; 249 + } else { 250 + xp_nofault_PIOR_target = SH1_IPI_ACCESS; 251 + } 252 + 253 + /* initialize the connection registration semaphores */ 254 + for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 255 + sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ 256 + } 257 + 258 + return 0; 259 + } 260 + module_init(xp_init); 261 + 262 + 263 + void __exit 264 + xp_exit(void) 265 + { 266 + u64 func_addr = *(u64 *) xp_nofault_PIOR; 267 + u64 err_func_addr = *(u64 *) xp_error_PIOR; 268 + 269 + 270 + /* unregister the PIO read nofault code region */ 271 + (void) sn_register_nofault_code(func_addr, err_func_addr, 272 + err_func_addr, 1, 0); 273 + } 274 + module_exit(xp_exit); 275 + 276 + 277 + MODULE_AUTHOR("Silicon Graphics, Inc."); 278 + MODULE_DESCRIPTION("Cross Partition (XP) base"); 279 + MODULE_LICENSE("GPL"); 280 + 281 + EXPORT_SYMBOL(xp_nofault_PIOR); 282 + EXPORT_SYMBOL(xp_nofault_PIOR_target); 283 + EXPORT_SYMBOL(xpc_registrations); 284 + EXPORT_SYMBOL(xpc_interface); 285 + EXPORT_SYMBOL(xpc_clear_interface); 286 + EXPORT_SYMBOL(xpc_set_interface); 287 + EXPORT_SYMBOL(xpc_connect); 288 + EXPORT_SYMBOL(xpc_disconnect); 289 +
+31
arch/ia64/sn/kernel/xp_nofault.S
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 + */ 8 + 9 + 10 + /* 11 + * The xp_nofault_PIOR function takes a pointer to a remote PIO register 12 + * and attempts to load and consume a value from it. This function 13 + * will be registered as a nofault code block. In the event that the 14 + * PIO read fails, the MCA handler will force the error to look 15 + * corrected and vector to the xp_error_PIOR which will return an error. 16 + * 17 + * extern int xp_nofault_PIOR(void *remote_register); 18 + */ 19 + 20 + .global xp_nofault_PIOR 21 + xp_nofault_PIOR: 22 + mov r8=r0 // Stage a success return value 23 + ld8.acq r9=[r32];; // PIO Read the specified register 24 + adds r9=1,r9 // Add to force a consume 25 + br.ret.sptk.many b0;; // Return success 26 + 27 + .global xp_error_PIOR 28 + xp_error_PIOR: 29 + mov r8=1 // Return value of 1 30 + br.ret.sptk.many b0;; // Return failure 31 +
+991
arch/ia64/sn/kernel/xpc.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 + */ 8 + 9 + 10 + /* 11 + * Cross Partition Communication (XPC) structures and macros. 12 + */ 13 + 14 + #ifndef _IA64_SN_KERNEL_XPC_H 15 + #define _IA64_SN_KERNEL_XPC_H 16 + 17 + 18 + #include <linux/config.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/sysctl.h> 21 + #include <linux/device.h> 22 + #include <asm/pgtable.h> 23 + #include <asm/processor.h> 24 + #include <asm/sn/bte.h> 25 + #include <asm/sn/clksupport.h> 26 + #include <asm/sn/addrs.h> 27 + #include <asm/sn/mspec.h> 28 + #include <asm/sn/shub_mmr.h> 29 + #include <asm/sn/xp.h> 30 + 31 + 32 + /* 33 + * XPC Version numbers consist of a major and minor number. XPC can always 34 + * talk to versions with same major #, and never talk to versions with a 35 + * different major #. 36 + */ 37 + #define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) 38 + #define XPC_VERSION_MAJOR(_v) ((_v) >> 4) 39 + #define XPC_VERSION_MINOR(_v) ((_v) & 0xf) 40 + 41 + 42 + /* 43 + * The next macros define word or bit representations for given 44 + * C-brick nasid in either the SAL provided bit array representing 45 + * nasids in the partition/machine or the AMO_t array used for 46 + * inter-partition initiation communications. 47 + * 48 + * For SN2 machines, C-Bricks are alway even numbered NASIDs. As 49 + * such, some space will be saved by insisting that nasid information 50 + * passed from SAL always be packed for C-Bricks and the 51 + * cross-partition interrupts use the same packing scheme. 52 + */ 53 + #define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2) 54 + #define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1)) 55 + #define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \ 56 + (1UL << XPC_NASID_B_INDEX(_n))) 57 + #define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) 58 + 59 + #define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ 60 + #define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */ 61 + 62 + /* define the process name of HB checker and the CPU it is pinned to */ 63 + #define XPC_HB_CHECK_THREAD_NAME "xpc_hb" 64 + #define XPC_HB_CHECK_CPU 0 65 + 66 + /* define the process name of the discovery thread */ 67 + #define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" 68 + 69 + 70 + #define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p))) 71 + #define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p)) 72 + #define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p))) 73 + 74 + 75 + /* 76 + * Reserved Page provided by SAL. 77 + * 78 + * SAL provides one page per partition of reserved memory. When SAL 79 + * initialization is complete, SAL_signature, SAL_version, partid, 80 + * part_nasids, and mach_nasids are set. 81 + * 82 + * Note: Until vars_pa is set, the partition XPC code has not been initialized. 83 + */ 84 + struct xpc_rsvd_page { 85 + u64 SAL_signature; /* SAL unique signature */ 86 + u64 SAL_version; /* SAL specified version */ 87 + u8 partid; /* partition ID from SAL */ 88 + u8 version; 89 + u8 pad[6]; /* pad to u64 align */ 90 + u64 vars_pa; 91 + u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; 92 + u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; 93 + }; 94 + #define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */ 95 + 96 + #define XPC_RSVD_PAGE_ALIGNED_SIZE \ 97 + (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) 98 + 99 + 100 + /* 101 + * Define the structures by which XPC variables can be exported to other 102 + * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) 103 + */ 104 + 105 + /* 106 + * The following structure describes the partition generic variables 107 + * needed by other partitions in order to properly initialize. 108 + * 109 + * struct xpc_vars version number also applies to struct xpc_vars_part. 110 + * Changes to either structure and/or related functionality should be 111 + * reflected by incrementing either the major or minor version numbers 112 + * of struct xpc_vars. 113 + */ 114 + struct xpc_vars { 115 + u8 version; 116 + u64 heartbeat; 117 + u64 heartbeating_to_mask; 118 + u64 kdb_status; /* 0 = machine running */ 119 + int act_nasid; 120 + int act_phys_cpuid; 121 + u64 vars_part_pa; 122 + u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 123 + AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 124 + AMO_t *act_amos; /* pointer to the first activation AMO */ 125 + }; 126 + #define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */ 127 + 128 + #define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) 129 + 130 + /* 131 + * The following structure describes the per partition specific variables. 132 + * 133 + * An array of these structures, one per partition, will be defined. As a 134 + * partition becomes active XPC will copy the array entry corresponding to 135 + * itself from that partition. It is desirable that the size of this 136 + * structure evenly divide into a cacheline, such that none of the entries 137 + * in this array crosses a cacheline boundary. As it is now, each entry 138 + * occupies half a cacheline. 139 + */ 140 + struct xpc_vars_part { 141 + u64 magic; 142 + 143 + u64 openclose_args_pa; /* physical address of open and close args */ 144 + u64 GPs_pa; /* physical address of Get/Put values */ 145 + 146 + u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ 147 + int IPI_nasid; /* nasid of where to send IPIs */ 148 + int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */ 149 + 150 + u8 nchannels; /* #of defined channels supported */ 151 + 152 + u8 reserved[23]; /* pad to a full 64 bytes */ 153 + }; 154 + 155 + /* 156 + * The vars_part MAGIC numbers play a part in the first contact protocol. 157 + * 158 + * MAGIC1 indicates that the per partition specific variables for a remote 159 + * partition have been initialized by this partition. 160 + * 161 + * MAGIC2 indicates that this partition has pulled the remote partititions 162 + * per partition variables that pertain to this partition. 163 + */ 164 + #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 165 + #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 166 + 167 + 168 + 169 + /* 170 + * Functions registered by add_timer() or called by kernel_thread() only 171 + * allow for a single 64-bit argument. The following macros can be used to 172 + * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from 173 + * the passed argument. 174 + */ 175 + #define XPC_PACK_ARGS(_arg1, _arg2) \ 176 + ((((u64) _arg1) & 0xffffffff) | \ 177 + ((((u64) _arg2) & 0xffffffff) << 32)) 178 + 179 + #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) 180 + #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) 181 + 182 + 183 + 184 + /* 185 + * Define a Get/Put value pair (pointers) used with a message queue. 186 + */ 187 + struct xpc_gp { 188 + s64 get; /* Get value */ 189 + s64 put; /* Put value */ 190 + }; 191 + 192 + #define XPC_GP_SIZE \ 193 + L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) 194 + 195 + 196 + 197 + /* 198 + * Define a structure that contains arguments associated with opening and 199 + * closing a channel. 200 + */ 201 + struct xpc_openclose_args { 202 + u16 reason; /* reason why channel is closing */ 203 + u16 msg_size; /* sizeof each message entry */ 204 + u16 remote_nentries; /* #of message entries in remote msg queue */ 205 + u16 local_nentries; /* #of message entries in local msg queue */ 206 + u64 local_msgqueue_pa; /* physical address of local message queue */ 207 + }; 208 + 209 + #define XPC_OPENCLOSE_ARGS_SIZE \ 210 + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) 211 + 212 + 213 + 214 + /* struct xpc_msg flags */ 215 + 216 + #define XPC_M_DONE 0x01 /* msg has been received/consumed */ 217 + #define XPC_M_READY 0x02 /* msg is ready to be sent */ 218 + #define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ 219 + 220 + 221 + #define XPC_MSG_ADDRESS(_payload) \ 222 + ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) 223 + 224 + 225 + 226 + /* 227 + * Defines notify entry. 228 + * 229 + * This is used to notify a message's sender that their message was received 230 + * and consumed by the intended recipient. 231 + */ 232 + struct xpc_notify { 233 + struct semaphore sema; /* notify semaphore */ 234 + u8 type; /* type of notification */ 235 + 236 + /* the following two fields are only used if type == XPC_N_CALL */ 237 + xpc_notify_func func; /* user's notify function */ 238 + void *key; /* pointer to user's key */ 239 + }; 240 + 241 + /* struct xpc_notify type of notification */ 242 + 243 + #define XPC_N_CALL 0x01 /* notify function provided by user */ 244 + 245 + 246 + 247 + /* 248 + * Define the structure that manages all the stuff required by a channel. In 249 + * particular, they are used to manage the messages sent across the channel. 250 + * 251 + * This structure is private to a partition, and is NOT shared across the 252 + * partition boundary. 253 + * 254 + * There is an array of these structures for each remote partition. It is 255 + * allocated at the time a partition becomes active. The array contains one 256 + * of these structures for each potential channel connection to that partition. 257 + * 258 + * Each of these structures manages two message queues (circular buffers). 259 + * They are allocated at the time a channel connection is made. One of 260 + * these message queues (local_msgqueue) holds the locally created messages 261 + * that are destined for the remote partition. The other of these message 262 + * queues (remote_msgqueue) is a locally cached copy of the remote partition's 263 + * own local_msgqueue. 264 + * 265 + * The following is a description of the Get/Put pointers used to manage these 266 + * two message queues. Consider the local_msgqueue to be on one partition 267 + * and the remote_msgqueue to be its cached copy on another partition. A 268 + * description of what each of the lettered areas contains is included. 269 + * 270 + * 271 + * local_msgqueue remote_msgqueue 272 + * 273 + * |/////////| |/////////| 274 + * w_remote_GP.get --> +---------+ |/////////| 275 + * | F | |/////////| 276 + * remote_GP.get --> +---------+ +---------+ <-- local_GP->get 277 + * | | | | 278 + * | | | E | 279 + * | | | | 280 + * | | +---------+ <-- w_local_GP.get 281 + * | B | |/////////| 282 + * | | |////D////| 283 + * | | |/////////| 284 + * | | +---------+ <-- w_remote_GP.put 285 + * | | |////C////| 286 + * local_GP->put --> +---------+ +---------+ <-- remote_GP.put 287 + * | | |/////////| 288 + * | A | |/////////| 289 + * | | |/////////| 290 + * w_local_GP.put --> +---------+ |/////////| 291 + * |/////////| |/////////| 292 + * 293 + * 294 + * ( remote_GP.[get|put] are cached copies of the remote 295 + * partition's local_GP->[get|put], and thus their values can 296 + * lag behind their counterparts on the remote partition. ) 297 + * 298 + * 299 + * A - Messages that have been allocated, but have not yet been sent to the 300 + * remote partition. 301 + * 302 + * B - Messages that have been sent, but have not yet been acknowledged by the 303 + * remote partition as having been received. 304 + * 305 + * C - Area that needs to be prepared for the copying of sent messages, by 306 + * the clearing of the message flags of any previously received messages. 307 + * 308 + * D - Area into which sent messages are to be copied from the remote 309 + * partition's local_msgqueue and then delivered to their intended 310 + * recipients. [ To allow for a multi-message copy, another pointer 311 + * (next_msg_to_pull) has been added to keep track of the next message 312 + * number needing to be copied (pulled). It chases after w_remote_GP.put. 313 + * Any messages lying between w_local_GP.get and next_msg_to_pull have 314 + * been copied and are ready to be delivered. ] 315 + * 316 + * E - Messages that have been copied and delivered, but have not yet been 317 + * acknowledged by the recipient as having been received. 318 + * 319 + * F - Messages that have been acknowledged, but XPC has not yet notified the 320 + * sender that the message was received by its intended recipient. 321 + * This is also an area that needs to be prepared for the allocating of 322 + * new messages, by the clearing of the message flags of the acknowledged 323 + * messages. 324 + */ 325 + struct xpc_channel { 326 + partid_t partid; /* ID of remote partition connected */ 327 + spinlock_t lock; /* lock for updating this structure */ 328 + u32 flags; /* general flags */ 329 + 330 + enum xpc_retval reason; /* reason why channel is disconnect'g */ 331 + int reason_line; /* line# disconnect initiated from */ 332 + 333 + u16 number; /* channel # */ 334 + 335 + u16 msg_size; /* sizeof each msg entry */ 336 + u16 local_nentries; /* #of msg entries in local msg queue */ 337 + u16 remote_nentries; /* #of msg entries in remote msg queue*/ 338 + 339 + void *local_msgqueue_base; /* base address of kmalloc'd space */ 340 + struct xpc_msg *local_msgqueue; /* local message queue */ 341 + void *remote_msgqueue_base; /* base address of kmalloc'd space */ 342 + struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ 343 + /* local message queue */ 344 + u64 remote_msgqueue_pa; /* phys addr of remote partition's */ 345 + /* local message queue */ 346 + 347 + atomic_t references; /* #of external references to queues */ 348 + 349 + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 350 + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 351 + 352 + /* queue of msg senders who want to be notified when msg received */ 353 + 354 + atomic_t n_to_notify; /* #of msg senders to notify */ 355 + struct xpc_notify *notify_queue;/* notify queue for messages sent */ 356 + 357 + xpc_channel_func func; /* user's channel function */ 358 + void *key; /* pointer to user's key */ 359 + 360 + struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ 361 + struct semaphore teardown_sema; /* wait for teardown completion */ 362 + 363 + struct xpc_openclose_args *local_openclose_args; /* args passed on */ 364 + /* opening or closing of channel */ 365 + 366 + /* various flavors of local and remote Get/Put values */ 367 + 368 + struct xpc_gp *local_GP; /* local Get/Put values */ 369 + struct xpc_gp remote_GP; /* remote Get/Put values */ 370 + struct xpc_gp w_local_GP; /* working local Get/Put values */ 371 + struct xpc_gp w_remote_GP; /* working remote Get/Put values */ 372 + s64 next_msg_to_pull; /* Put value of next msg to pull */ 373 + 374 + /* kthread management related fields */ 375 + 376 + // >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps 377 + // >>> allow the assigned limit be unbounded and let the idle limit be dynamic 378 + // >>> dependent on activity over the last interval of time 379 + atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 380 + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ 381 + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ 382 + u32 kthreads_idle_limit; /* limit on #of kthreads idle */ 383 + atomic_t kthreads_active; /* #of kthreads actively working */ 384 + // >>> following field is temporary 385 + u32 kthreads_created; /* total #of kthreads created */ 386 + 387 + wait_queue_head_t idle_wq; /* idle kthread wait queue */ 388 + 389 + } ____cacheline_aligned; 390 + 391 + 392 + /* struct xpc_channel flags */ 393 + 394 + #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ 395 + 396 + #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ 397 + #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ 398 + #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ 399 + #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 400 + 401 + #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 402 + #define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ 403 + #define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ 404 + #define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ 405 + 406 + #define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ 407 + #define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ 408 + #define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ 409 + #define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ 410 + 411 + #define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ 412 + #define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ 413 + 414 + 415 + 416 + /* 417 + * Manages channels on a partition basis. There is one of these structures 418 + * for each partition (a partition will never utilize the structure that 419 + * represents itself). 420 + */ 421 + struct xpc_partition { 422 + 423 + /* XPC HB infrastructure */ 424 + 425 + u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 426 + u64 remote_vars_pa; /* phys addr of partition's vars */ 427 + u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 428 + u64 last_heartbeat; /* HB at last read */ 429 + u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 430 + int remote_act_nasid; /* active part's act/deact nasid */ 431 + int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ 432 + u32 act_IRQ_rcvd; /* IRQs since activation */ 433 + spinlock_t act_lock; /* protect updating of act_state */ 434 + u8 act_state; /* from XPC HB viewpoint */ 435 + enum xpc_retval reason; /* reason partition is deactivating */ 436 + int reason_line; /* line# deactivation initiated from */ 437 + int reactivate_nasid; /* nasid in partition to reactivate */ 438 + 439 + 440 + /* XPC infrastructure referencing and teardown control */ 441 + 442 + u8 setup_state; /* infrastructure setup state */ 443 + wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 444 + atomic_t references; /* #of references to infrastructure */ 445 + 446 + 447 + /* 448 + * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN 449 + * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION 450 + * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE 451 + * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) 452 + */ 453 + 454 + 455 + u8 nchannels; /* #of defined channels supported */ 456 + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 457 + struct xpc_channel *channels;/* array of channel structures */ 458 + 459 + void *local_GPs_base; /* base address of kmalloc'd space */ 460 + struct xpc_gp *local_GPs; /* local Get/Put values */ 461 + void *remote_GPs_base; /* base address of kmalloc'd space */ 462 + struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ 463 + /* values */ 464 + u64 remote_GPs_pa; /* phys address of remote partition's local */ 465 + /* Get/Put values */ 466 + 467 + 468 + /* fields used to pass args when opening or closing a channel */ 469 + 470 + void *local_openclose_args_base; /* base address of kmalloc'd space */ 471 + struct xpc_openclose_args *local_openclose_args; /* local's args */ 472 + void *remote_openclose_args_base; /* base address of kmalloc'd space */ 473 + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 474 + /* args */ 475 + u64 remote_openclose_args_pa; /* phys addr of remote's args */ 476 + 477 + 478 + /* IPI sending, receiving and handling related fields */ 479 + 480 + int remote_IPI_nasid; /* nasid of where to send IPIs */ 481 + int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 482 + AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 483 + 484 + AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ 485 + u64 local_IPI_amo; /* IPI amo flags yet to be handled */ 486 + char IPI_owner[8]; /* IPI owner's name */ 487 + struct timer_list dropped_IPI_timer; /* dropped IPI timer */ 488 + 489 + spinlock_t IPI_lock; /* IPI handler lock */ 490 + 491 + 492 + /* channel manager related fields */ 493 + 494 + atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 495 + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 496 + 497 + } ____cacheline_aligned; 498 + 499 + 500 + /* struct xpc_partition act_state values (for XPC HB) */ 501 + 502 + #define XPC_P_INACTIVE 0x00 /* partition is not active */ 503 + #define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ 504 + #define XPC_P_ACTIVATING 0x02 /* activation thread started */ 505 + #define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ 506 + #define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ 507 + 508 + 509 + #define XPC_DEACTIVATE_PARTITION(_p, _reason) \ 510 + xpc_deactivate_partition(__LINE__, (_p), (_reason)) 511 + 512 + 513 + /* struct xpc_partition setup_state values */ 514 + 515 + #define XPC_P_UNSET 0x00 /* infrastructure was never setup */ 516 + #define XPC_P_SETUP 0x01 /* infrastructure is setup */ 517 + #define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ 518 + #define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 519 + 520 + 521 + /* 522 + * struct xpc_partition IPI_timer #of seconds to wait before checking for 523 + * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 524 + * after the IPI was received. 525 + */ 526 + #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 527 + 528 + 529 + #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) 530 + 531 + 532 + 533 + /* found in xp_main.c */ 534 + extern struct xpc_registration xpc_registrations[]; 535 + 536 + 537 + /* >>> found in xpc_main.c only */ 538 + extern struct device *xpc_part; 539 + extern struct device *xpc_chan; 540 + extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); 541 + extern void xpc_dropped_IPI_check(struct xpc_partition *); 542 + extern void xpc_activate_kthreads(struct xpc_channel *, int); 543 + extern void xpc_create_kthreads(struct xpc_channel *, int); 544 + extern void xpc_disconnect_wait(int); 545 + 546 + 547 + /* found in xpc_main.c and efi-xpc.c */ 548 + extern void xpc_activate_partition(struct xpc_partition *); 549 + 550 + 551 + /* found in xpc_partition.c */ 552 + extern int xpc_exiting; 553 + extern int xpc_hb_interval; 554 + extern int xpc_hb_check_interval; 555 + extern struct xpc_vars *xpc_vars; 556 + extern struct xpc_rsvd_page *xpc_rsvd_page; 557 + extern struct xpc_vars_part *xpc_vars_part; 558 + extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 559 + extern char xpc_remote_copy_buffer[]; 560 + extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); 561 + extern void xpc_allow_IPI_ops(void); 562 + extern void xpc_restrict_IPI_ops(void); 563 + extern int xpc_identify_act_IRQ_sender(void); 564 + extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); 565 + extern void xpc_mark_partition_inactive(struct xpc_partition *); 566 + extern void xpc_discovery(void); 567 + extern void xpc_check_remote_hb(void); 568 + extern void xpc_deactivate_partition(const int, struct xpc_partition *, 569 + enum xpc_retval); 570 + extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); 571 + 572 + 573 + /* found in xpc_channel.c */ 574 + extern void xpc_initiate_connect(int); 575 + extern void xpc_initiate_disconnect(int); 576 + extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); 577 + extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); 578 + extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, 579 + xpc_notify_func, void *); 580 + extern void xpc_initiate_received(partid_t, int, void *); 581 + extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); 582 + extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); 583 + extern void xpc_process_channel_activity(struct xpc_partition *); 584 + extern void xpc_connected_callout(struct xpc_channel *); 585 + extern void xpc_deliver_msg(struct xpc_channel *); 586 + extern void xpc_disconnect_channel(const int, struct xpc_channel *, 587 + enum xpc_retval, unsigned long *); 588 + extern void xpc_disconnected_callout(struct xpc_channel *); 589 + extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval); 590 + extern void xpc_teardown_infrastructure(struct xpc_partition *); 591 + 592 + 593 + 594 + static inline void 595 + xpc_wakeup_channel_mgr(struct xpc_partition *part) 596 + { 597 + if (atomic_inc_return(&part->channel_mgr_requests) == 1) { 598 + wake_up(&part->channel_mgr_wq); 599 + } 600 + } 601 + 602 + 603 + 604 + /* 605 + * These next two inlines are used to keep us from tearing down a channel's 606 + * msg queues while a thread may be referencing them. 607 + */ 608 + static inline void 609 + xpc_msgqueue_ref(struct xpc_channel *ch) 610 + { 611 + atomic_inc(&ch->references); 612 + } 613 + 614 + static inline void 615 + xpc_msgqueue_deref(struct xpc_channel *ch) 616 + { 617 + s32 refs = atomic_dec_return(&ch->references); 618 + 619 + DBUG_ON(refs < 0); 620 + if (refs == 0) { 621 + xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); 622 + } 623 + } 624 + 625 + 626 + 627 + #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ 628 + xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) 629 + 630 + 631 + /* 632 + * These two inlines are used to keep us from tearing down a partition's 633 + * setup infrastructure while a thread may be referencing it. 634 + */ 635 + static inline void 636 + xpc_part_deref(struct xpc_partition *part) 637 + { 638 + s32 refs = atomic_dec_return(&part->references); 639 + 640 + 641 + DBUG_ON(refs < 0); 642 + if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { 643 + wake_up(&part->teardown_wq); 644 + } 645 + } 646 + 647 + static inline int 648 + xpc_part_ref(struct xpc_partition *part) 649 + { 650 + int setup; 651 + 652 + 653 + atomic_inc(&part->references); 654 + setup = (part->setup_state == XPC_P_SETUP); 655 + if (!setup) { 656 + xpc_part_deref(part); 657 + } 658 + return setup; 659 + } 660 + 661 + 662 + 663 + /* 664 + * The following macro is to be used for the setting of the reason and 665 + * reason_line fields in both the struct xpc_channel and struct xpc_partition 666 + * structures. 667 + */ 668 + #define XPC_SET_REASON(_p, _reason, _line) \ 669 + { \ 670 + (_p)->reason = _reason; \ 671 + (_p)->reason_line = _line; \ 672 + } 673 + 674 + 675 + 676 + /* 677 + * The following set of macros and inlines are used for the sending and 678 + * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 679 + * one that is associated with partition activity (SGI_XPC_ACTIVATE) and 680 + * the other that is associated with channel activity (SGI_XPC_NOTIFY). 681 + */ 682 + 683 + static inline u64 684 + xpc_IPI_receive(AMO_t *amo) 685 + { 686 + return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); 687 + } 688 + 689 + 690 + static inline enum xpc_retval 691 + xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) 692 + { 693 + int ret = 0; 694 + unsigned long irq_flags; 695 + 696 + 697 + local_irq_save(irq_flags); 698 + 699 + FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); 700 + sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); 701 + 702 + /* 703 + * We must always use the nofault function regardless of whether we 704 + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 705 + * didn't, we'd never know that the other partition is down and would 706 + * keep sending IPIs and AMOs to it until the heartbeat times out. 707 + */ 708 + ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 709 + xp_nofault_PIOR_target)); 710 + 711 + local_irq_restore(irq_flags); 712 + 713 + return ((ret == 0) ? xpcSuccess : xpcPioReadError); 714 + } 715 + 716 + 717 + /* 718 + * IPIs associated with SGI_XPC_ACTIVATE IRQ. 719 + */ 720 + 721 + /* 722 + * Flag the appropriate AMO variable and send an IPI to the specified node. 723 + */ 724 + static inline void 725 + xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid, 726 + int to_phys_cpuid) 727 + { 728 + int w_index = XPC_NASID_W_INDEX(from_nasid); 729 + int b_index = XPC_NASID_B_INDEX(from_nasid); 730 + AMO_t *amos = (AMO_t *) __va(amos_page + 731 + (XP_MAX_PARTITIONS * sizeof(AMO_t))); 732 + 733 + 734 + (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 735 + to_phys_cpuid, SGI_XPC_ACTIVATE); 736 + } 737 + 738 + static inline void 739 + xpc_IPI_send_activate(struct xpc_vars *vars) 740 + { 741 + xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), 742 + vars->act_nasid, vars->act_phys_cpuid); 743 + } 744 + 745 + static inline void 746 + xpc_IPI_send_activated(struct xpc_partition *part) 747 + { 748 + xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 749 + part->remote_act_nasid, part->remote_act_phys_cpuid); 750 + } 751 + 752 + static inline void 753 + xpc_IPI_send_reactivate(struct xpc_partition *part) 754 + { 755 + xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, 756 + xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 757 + } 758 + 759 + 760 + /* 761 + * IPIs associated with SGI_XPC_NOTIFY IRQ. 762 + */ 763 + 764 + /* 765 + * Send an IPI to the remote partition that is associated with the 766 + * specified channel. 767 + */ 768 + #define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ 769 + xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f) 770 + 771 + static inline void 772 + xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, 773 + unsigned long *irq_flags) 774 + { 775 + struct xpc_partition *part = &xpc_partitions[ch->partid]; 776 + enum xpc_retval ret; 777 + 778 + 779 + if (likely(part->act_state != XPC_P_DEACTIVATING)) { 780 + ret = xpc_IPI_send(part->remote_IPI_amo_va, 781 + (u64) ipi_flag << (ch->number * 8), 782 + part->remote_IPI_nasid, 783 + part->remote_IPI_phys_cpuid, 784 + SGI_XPC_NOTIFY); 785 + dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 786 + ipi_flag_string, ch->partid, ch->number, ret); 787 + if (unlikely(ret != xpcSuccess)) { 788 + if (irq_flags != NULL) { 789 + spin_unlock_irqrestore(&ch->lock, *irq_flags); 790 + } 791 + XPC_DEACTIVATE_PARTITION(part, ret); 792 + if (irq_flags != NULL) { 793 + spin_lock_irqsave(&ch->lock, *irq_flags); 794 + } 795 + } 796 + } 797 + } 798 + 799 + 800 + /* 801 + * Make it look like the remote partition, which is associated with the 802 + * specified channel, sent us an IPI. This faked IPI will be handled 803 + * by xpc_dropped_IPI_check(). 804 + */ 805 + #define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ 806 + xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f) 807 + 808 + static inline void 809 + xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, 810 + char *ipi_flag_string) 811 + { 812 + struct xpc_partition *part = &xpc_partitions[ch->partid]; 813 + 814 + 815 + FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), 816 + FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); 817 + dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", 818 + ipi_flag_string, ch->partid, ch->number); 819 + } 820 + 821 + 822 + /* 823 + * The sending and receiving of IPIs includes the setting of an AMO variable 824 + * to indicate the reason the IPI was sent. The 64-bit variable is divided 825 + * up into eight bytes, ordered from right to left. Byte zero pertains to 826 + * channel 0, byte one to channel 1, and so on. Each byte is described by 827 + * the following IPI flags. 828 + */ 829 + 830 + #define XPC_IPI_CLOSEREQUEST 0x01 831 + #define XPC_IPI_CLOSEREPLY 0x02 832 + #define XPC_IPI_OPENREQUEST 0x04 833 + #define XPC_IPI_OPENREPLY 0x08 834 + #define XPC_IPI_MSGREQUEST 0x10 835 + 836 + 837 + /* given an AMO variable and a channel#, get its associated IPI flags */ 838 + #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 839 + 840 + #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) 841 + #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) 842 + 843 + 844 + static inline void 845 + xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) 846 + { 847 + struct xpc_openclose_args *args = ch->local_openclose_args; 848 + 849 + 850 + args->reason = ch->reason; 851 + 852 + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); 853 + } 854 + 855 + static inline void 856 + xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags) 857 + { 858 + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags); 859 + } 860 + 861 + static inline void 862 + xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) 863 + { 864 + struct xpc_openclose_args *args = ch->local_openclose_args; 865 + 866 + 867 + args->msg_size = ch->msg_size; 868 + args->local_nentries = ch->local_nentries; 869 + 870 + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags); 871 + } 872 + 873 + static inline void 874 + xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) 875 + { 876 + struct xpc_openclose_args *args = ch->local_openclose_args; 877 + 878 + 879 + args->remote_nentries = ch->remote_nentries; 880 + args->local_nentries = ch->local_nentries; 881 + args->local_msgqueue_pa = __pa(ch->local_msgqueue); 882 + 883 + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags); 884 + } 885 + 886 + static inline void 887 + xpc_IPI_send_msgrequest(struct xpc_channel *ch) 888 + { 889 + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL); 890 + } 891 + 892 + static inline void 893 + xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) 894 + { 895 + XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); 896 + } 897 + 898 + 899 + /* 900 + * Memory for XPC's AMO variables is allocated by the MSPEC driver. These 901 + * pages are located in the lowest granule. The lowest granule uses 4k pages 902 + * for cached references and an alternate TLB handler to never provide a 903 + * cacheable mapping for the entire region. This will prevent speculative 904 + * reading of cached copies of our lines from being issued which will cause 905 + * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 906 + * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c) 907 + * and an additional 16 AMO variables for partition activation (xpc_hb.c). 908 + */ 909 + static inline AMO_t * 910 + xpc_IPI_init(partid_t partid) 911 + { 912 + AMO_t *part_amo = xpc_vars->amos_page + partid; 913 + 914 + 915 + xpc_IPI_receive(part_amo); 916 + return part_amo; 917 + } 918 + 919 + 920 + 921 + static inline enum xpc_retval 922 + xpc_map_bte_errors(bte_result_t error) 923 + { 924 + switch (error) { 925 + case BTE_SUCCESS: return xpcSuccess; 926 + case BTEFAIL_DIR: return xpcBteDirectoryError; 927 + case BTEFAIL_POISON: return xpcBtePoisonError; 928 + case BTEFAIL_WERR: return xpcBteWriteError; 929 + case BTEFAIL_ACCESS: return xpcBteAccessError; 930 + case BTEFAIL_PWERR: return xpcBtePWriteError; 931 + case BTEFAIL_PRERR: return xpcBtePReadError; 932 + case BTEFAIL_TOUT: return xpcBteTimeOutError; 933 + case BTEFAIL_XTERR: return xpcBteXtalkError; 934 + case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; 935 + default: return xpcBteUnmappedError; 936 + } 937 + } 938 + 939 + 940 + 941 + static inline void * 942 + xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base) 943 + { 944 + /* see if kmalloc will give us cachline aligned memory by default */ 945 + *base = kmalloc(size, flags); 946 + if (*base == NULL) { 947 + return NULL; 948 + } 949 + if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 950 + return *base; 951 + } 952 + kfree(*base); 953 + 954 + /* nope, we'll have to do it ourselves */ 955 + *base = kmalloc(size + L1_CACHE_BYTES, flags); 956 + if (*base == NULL) { 957 + return NULL; 958 + } 959 + return (void *) L1_CACHE_ALIGN((u64) *base); 960 + } 961 + 962 + 963 + /* 964 + * Check to see if there is any channel activity to/from the specified 965 + * partition. 966 + */ 967 + static inline void 968 + xpc_check_for_channel_activity(struct xpc_partition *part) 969 + { 970 + u64 IPI_amo; 971 + unsigned long irq_flags; 972 + 973 + 974 + IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); 975 + if (IPI_amo == 0) { 976 + return; 977 + } 978 + 979 + spin_lock_irqsave(&part->IPI_lock, irq_flags); 980 + part->local_IPI_amo |= IPI_amo; 981 + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 982 + 983 + dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", 984 + XPC_PARTID(part), IPI_amo); 985 + 986 + xpc_wakeup_channel_mgr(part); 987 + } 988 + 989 + 990 + #endif /* _IA64_SN_KERNEL_XPC_H */ 991 +
+2297
arch/ia64/sn/kernel/xpc_channel.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 + */ 8 + 9 + 10 + /* 11 + * Cross Partition Communication (XPC) channel support. 12 + * 13 + * This is the part of XPC that manages the channels and 14 + * sends/receives messages across them to/from other partitions. 15 + * 16 + */ 17 + 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/init.h> 21 + #include <linux/sched.h> 22 + #include <linux/cache.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/slab.h> 25 + #include <asm/sn/bte.h> 26 + #include <asm/sn/sn_sal.h> 27 + #include "xpc.h" 28 + 29 + 30 + /* 31 + * Set up the initial values for the XPartition Communication channels. 32 + */ 33 + static void 34 + xpc_initialize_channels(struct xpc_partition *part, partid_t partid) 35 + { 36 + int ch_number; 37 + struct xpc_channel *ch; 38 + 39 + 40 + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 41 + ch = &part->channels[ch_number]; 42 + 43 + ch->partid = partid; 44 + ch->number = ch_number; 45 + ch->flags = XPC_C_DISCONNECTED; 46 + 47 + ch->local_GP = &part->local_GPs[ch_number]; 48 + ch->local_openclose_args = 49 + &part->local_openclose_args[ch_number]; 50 + 51 + atomic_set(&ch->kthreads_assigned, 0); 52 + atomic_set(&ch->kthreads_idle, 0); 53 + atomic_set(&ch->kthreads_active, 0); 54 + 55 + atomic_set(&ch->references, 0); 56 + atomic_set(&ch->n_to_notify, 0); 57 + 58 + spin_lock_init(&ch->lock); 59 + sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ 60 + 61 + atomic_set(&ch->n_on_msg_allocate_wq, 0); 62 + init_waitqueue_head(&ch->msg_allocate_wq); 63 + init_waitqueue_head(&ch->idle_wq); 64 + } 65 + } 66 + 67 + 68 + /* 69 + * Setup the infrastructure necessary to support XPartition Communication 70 + * between the specified remote partition and the local one. 71 + */ 72 + enum xpc_retval 73 + xpc_setup_infrastructure(struct xpc_partition *part) 74 + { 75 + int ret; 76 + struct timer_list *timer; 77 + partid_t partid = XPC_PARTID(part); 78 + 79 + 80 + /* 81 + * Zero out MOST of the entry for this partition. Only the fields 82 + * starting with `nchannels' will be zeroed. The preceding fields must 83 + * remain `viable' across partition ups and downs, since they may be 84 + * referenced during this memset() operation. 85 + */ 86 + memset(&part->nchannels, 0, sizeof(struct xpc_partition) - 87 + offsetof(struct xpc_partition, nchannels)); 88 + 89 + /* 90 + * Allocate all of the channel structures as a contiguous chunk of 91 + * memory. 92 + */ 93 + part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, 94 + GFP_KERNEL); 95 + if (part->channels == NULL) { 96 + dev_err(xpc_chan, "can't get memory for channels\n"); 97 + return xpcNoMemory; 98 + } 99 + memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS); 100 + 101 + part->nchannels = XPC_NCHANNELS; 102 + 103 + 104 + /* allocate all the required GET/PUT values */ 105 + 106 + part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, 107 + GFP_KERNEL, &part->local_GPs_base); 108 + if (part->local_GPs == NULL) { 109 + kfree(part->channels); 110 + part->channels = NULL; 111 + dev_err(xpc_chan, "can't get memory for local get/put " 112 + "values\n"); 113 + return xpcNoMemory; 114 + } 115 + memset(part->local_GPs, 0, XPC_GP_SIZE); 116 + 117 + part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, 118 + GFP_KERNEL, &part->remote_GPs_base); 119 + if (part->remote_GPs == NULL) { 120 + kfree(part->channels); 121 + part->channels = NULL; 122 + kfree(part->local_GPs_base); 123 + part->local_GPs = NULL; 124 + dev_err(xpc_chan, "can't get memory for remote get/put " 125 + "values\n"); 126 + return xpcNoMemory; 127 + } 128 + memset(part->remote_GPs, 0, XPC_GP_SIZE); 129 + 130 + 131 + /* allocate all the required open and close args */ 132 + 133 + part->local_openclose_args = xpc_kmalloc_cacheline_aligned( 134 + XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 135 + &part->local_openclose_args_base); 136 + if (part->local_openclose_args == NULL) { 137 + kfree(part->channels); 138 + part->channels = NULL; 139 + kfree(part->local_GPs_base); 140 + part->local_GPs = NULL; 141 + kfree(part->remote_GPs_base); 142 + part->remote_GPs = NULL; 143 + dev_err(xpc_chan, "can't get memory for local connect args\n"); 144 + return xpcNoMemory; 145 + } 146 + memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); 147 + 148 + part->remote_openclose_args = xpc_kmalloc_cacheline_aligned( 149 + XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 150 + &part->remote_openclose_args_base); 151 + if (part->remote_openclose_args == NULL) { 152 + kfree(part->channels); 153 + part->channels = NULL; 154 + kfree(part->local_GPs_base); 155 + part->local_GPs = NULL; 156 + kfree(part->remote_GPs_base); 157 + part->remote_GPs = NULL; 158 + kfree(part->local_openclose_args_base); 159 + part->local_openclose_args = NULL; 160 + dev_err(xpc_chan, "can't get memory for remote connect args\n"); 161 + return xpcNoMemory; 162 + } 163 + memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); 164 + 165 + 166 + xpc_initialize_channels(part, partid); 167 + 168 + atomic_set(&part->nchannels_active, 0); 169 + 170 + 171 + /* local_IPI_amo were set to 0 by an earlier memset() */ 172 + 173 + /* Initialize this partitions AMO_t structure */ 174 + part->local_IPI_amo_va = xpc_IPI_init(partid); 175 + 176 + spin_lock_init(&part->IPI_lock); 177 + 178 + atomic_set(&part->channel_mgr_requests, 1); 179 + init_waitqueue_head(&part->channel_mgr_wq); 180 + 181 + sprintf(part->IPI_owner, "xpc%02d", partid); 182 + ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, 183 + part->IPI_owner, (void *) (u64) partid); 184 + if (ret != 0) { 185 + kfree(part->channels); 186 + part->channels = NULL; 187 + kfree(part->local_GPs_base); 188 + part->local_GPs = NULL; 189 + kfree(part->remote_GPs_base); 190 + part->remote_GPs = NULL; 191 + kfree(part->local_openclose_args_base); 192 + part->local_openclose_args = NULL; 193 + kfree(part->remote_openclose_args_base); 194 + part->remote_openclose_args = NULL; 195 + dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 196 + "errno=%d\n", -ret); 197 + return xpcLackOfResources; 198 + } 199 + 200 + /* Setup a timer to check for dropped IPIs */ 201 + timer = &part->dropped_IPI_timer; 202 + init_timer(timer); 203 + timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; 204 + timer->data = (unsigned long) part; 205 + timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; 206 + add_timer(timer); 207 + 208 + /* 209 + * With the setting of the partition setup_state to XPC_P_SETUP, we're 210 + * declaring that this partition is ready to go. 211 + */ 212 + (volatile u8) part->setup_state = XPC_P_SETUP; 213 + 214 + 215 + /* 216 + * Setup the per partition specific variables required by the 217 + * remote partition to establish channel connections with us. 218 + * 219 + * The setting of the magic # indicates that these per partition 220 + * specific variables are ready to be used. 221 + */ 222 + xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); 223 + xpc_vars_part[partid].openclose_args_pa = 224 + __pa(part->local_openclose_args); 225 + xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); 226 + xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id()); 227 + xpc_vars_part[partid].IPI_phys_cpuid = 228 + cpu_physical_id(smp_processor_id()); 229 + xpc_vars_part[partid].nchannels = part->nchannels; 230 + (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1; 231 + 232 + return xpcSuccess; 233 + } 234 + 235 + 236 + /* 237 + * Create a wrapper that hides the underlying mechanism for pulling a cacheline 238 + * (or multiple cachelines) from a remote partition. 239 + * 240 + * src must be a cacheline aligned physical address on the remote partition. 241 + * dst must be a cacheline aligned virtual address on this partition. 242 + * cnt must be an cacheline sized 243 + */ 244 + static enum xpc_retval 245 + xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, 246 + const void *src, size_t cnt) 247 + { 248 + bte_result_t bte_ret; 249 + 250 + 251 + DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); 252 + DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst)); 253 + DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); 254 + 255 + if (part->act_state == XPC_P_DEACTIVATING) { 256 + return part->reason; 257 + } 258 + 259 + bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), 260 + (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); 261 + if (bte_ret == BTE_SUCCESS) { 262 + return xpcSuccess; 263 + } 264 + 265 + dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", 266 + XPC_PARTID(part), bte_ret); 267 + 268 + return xpc_map_bte_errors(bte_ret); 269 + } 270 + 271 + 272 + /* 273 + * Pull the remote per partititon specific variables from the specified 274 + * partition. 275 + */ 276 + enum xpc_retval 277 + xpc_pull_remote_vars_part(struct xpc_partition *part) 278 + { 279 + u8 buffer[L1_CACHE_BYTES * 2]; 280 + struct xpc_vars_part *pulled_entry_cacheline = 281 + (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); 282 + struct xpc_vars_part *pulled_entry; 283 + u64 remote_entry_cacheline_pa, remote_entry_pa; 284 + partid_t partid = XPC_PARTID(part); 285 + enum xpc_retval ret; 286 + 287 + 288 + /* pull the cacheline that contains the variables we're interested in */ 289 + 290 + DBUG_ON(part->remote_vars_part_pa != 291 + L1_CACHE_ALIGN(part->remote_vars_part_pa)); 292 + DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); 293 + 294 + remote_entry_pa = part->remote_vars_part_pa + 295 + sn_partition_id * sizeof(struct xpc_vars_part); 296 + 297 + remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); 298 + 299 + pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + 300 + (remote_entry_pa & (L1_CACHE_BYTES - 1))); 301 + 302 + ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, 303 + (void *) remote_entry_cacheline_pa, 304 + L1_CACHE_BYTES); 305 + if (ret != xpcSuccess) { 306 + dev_dbg(xpc_chan, "failed to pull XPC vars_part from " 307 + "partition %d, ret=%d\n", partid, ret); 308 + return ret; 309 + } 310 + 311 + 312 + /* see if they've been set up yet */ 313 + 314 + if (pulled_entry->magic != XPC_VP_MAGIC1 && 315 + pulled_entry->magic != XPC_VP_MAGIC2) { 316 + 317 + if (pulled_entry->magic != 0) { 318 + dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 319 + "partition %d has bad magic value (=0x%lx)\n", 320 + partid, sn_partition_id, pulled_entry->magic); 321 + return xpcBadMagic; 322 + } 323 + 324 + /* they've not been initialized yet */ 325 + return xpcRetry; 326 + } 327 + 328 + if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { 329 + 330 + /* validate the variables */ 331 + 332 + if (pulled_entry->GPs_pa == 0 || 333 + pulled_entry->openclose_args_pa == 0 || 334 + pulled_entry->IPI_amo_pa == 0) { 335 + 336 + dev_err(xpc_chan, "partition %d's XPC vars_part for " 337 + "partition %d are not valid\n", partid, 338 + sn_partition_id); 339 + return xpcInvalidAddress; 340 + } 341 + 342 + /* the variables we imported look to be valid */ 343 + 344 + part->remote_GPs_pa = pulled_entry->GPs_pa; 345 + part->remote_openclose_args_pa = 346 + pulled_entry->openclose_args_pa; 347 + part->remote_IPI_amo_va = 348 + (AMO_t *) __va(pulled_entry->IPI_amo_pa); 349 + part->remote_IPI_nasid = pulled_entry->IPI_nasid; 350 + part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 351 + 352 + if (part->nchannels > pulled_entry->nchannels) { 353 + part->nchannels = pulled_entry->nchannels; 354 + } 355 + 356 + /* let the other side know that we've pulled their variables */ 357 + 358 + (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2; 359 + } 360 + 361 + if (pulled_entry->magic == XPC_VP_MAGIC1) { 362 + return xpcRetry; 363 + } 364 + 365 + return xpcSuccess; 366 + } 367 + 368 + 369 + /* 370 + * Get the IPI flags and pull the openclose args and/or remote GPs as needed. 371 + */ 372 + static u64 373 + xpc_get_IPI_flags(struct xpc_partition *part) 374 + { 375 + unsigned long irq_flags; 376 + u64 IPI_amo; 377 + enum xpc_retval ret; 378 + 379 + 380 + /* 381 + * See if there are any IPI flags to be handled. 382 + */ 383 + 384 + spin_lock_irqsave(&part->IPI_lock, irq_flags); 385 + if ((IPI_amo = part->local_IPI_amo) != 0) { 386 + part->local_IPI_amo = 0; 387 + } 388 + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 389 + 390 + 391 + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 392 + ret = xpc_pull_remote_cachelines(part, 393 + part->remote_openclose_args, 394 + (void *) part->remote_openclose_args_pa, 395 + XPC_OPENCLOSE_ARGS_SIZE); 396 + if (ret != xpcSuccess) { 397 + XPC_DEACTIVATE_PARTITION(part, ret); 398 + 399 + dev_dbg(xpc_chan, "failed to pull openclose args from " 400 + "partition %d, ret=%d\n", XPC_PARTID(part), 401 + ret); 402 + 403 + /* don't bother processing IPIs anymore */ 404 + IPI_amo = 0; 405 + } 406 + } 407 + 408 + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { 409 + ret = xpc_pull_remote_cachelines(part, part->remote_GPs, 410 + (void *) part->remote_GPs_pa, 411 + XPC_GP_SIZE); 412 + if (ret != xpcSuccess) { 413 + XPC_DEACTIVATE_PARTITION(part, ret); 414 + 415 + dev_dbg(xpc_chan, "failed to pull GPs from partition " 416 + "%d, ret=%d\n", XPC_PARTID(part), ret); 417 + 418 + /* don't bother processing IPIs anymore */ 419 + IPI_amo = 0; 420 + } 421 + } 422 + 423 + return IPI_amo; 424 + } 425 + 426 + 427 + /* 428 + * Allocate the local message queue and the notify queue. 429 + */ 430 + static enum xpc_retval 431 + xpc_allocate_local_msgqueue(struct xpc_channel *ch) 432 + { 433 + unsigned long irq_flags; 434 + int nentries; 435 + size_t nbytes; 436 + 437 + 438 + // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 439 + // >>> iterations of the for-loop, bail if set? 440 + 441 + // >>> should we impose a minumum #of entries? like 4 or 8? 442 + for (nentries = ch->local_nentries; nentries > 0; nentries--) { 443 + 444 + nbytes = nentries * ch->msg_size; 445 + ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, 446 + (GFP_KERNEL | GFP_DMA), 447 + &ch->local_msgqueue_base); 448 + if (ch->local_msgqueue == NULL) { 449 + continue; 450 + } 451 + memset(ch->local_msgqueue, 0, nbytes); 452 + 453 + nbytes = nentries * sizeof(struct xpc_notify); 454 + ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA)); 455 + if (ch->notify_queue == NULL) { 456 + kfree(ch->local_msgqueue_base); 457 + ch->local_msgqueue = NULL; 458 + continue; 459 + } 460 + memset(ch->notify_queue, 0, nbytes); 461 + 462 + spin_lock_irqsave(&ch->lock, irq_flags); 463 + if (nentries < ch->local_nentries) { 464 + dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " 465 + "partid=%d, channel=%d\n", nentries, 466 + ch->local_nentries, ch->partid, ch->number); 467 + 468 + ch->local_nentries = nentries; 469 + } 470 + spin_unlock_irqrestore(&ch->lock, irq_flags); 471 + return xpcSuccess; 472 + } 473 + 474 + dev_dbg(xpc_chan, "can't get memory for local message queue and notify " 475 + "queue, partid=%d, channel=%d\n", ch->partid, ch->number); 476 + return xpcNoMemory; 477 + } 478 + 479 + 480 + /* 481 + * Allocate the cached remote message queue. 482 + */ 483 + static enum xpc_retval 484 + xpc_allocate_remote_msgqueue(struct xpc_channel *ch) 485 + { 486 + unsigned long irq_flags; 487 + int nentries; 488 + size_t nbytes; 489 + 490 + 491 + DBUG_ON(ch->remote_nentries <= 0); 492 + 493 + // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 494 + // >>> iterations of the for-loop, bail if set? 495 + 496 + // >>> should we impose a minumum #of entries? like 4 or 8? 497 + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 498 + 499 + nbytes = nentries * ch->msg_size; 500 + ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, 501 + (GFP_KERNEL | GFP_DMA), 502 + &ch->remote_msgqueue_base); 503 + if (ch->remote_msgqueue == NULL) { 504 + continue; 505 + } 506 + memset(ch->remote_msgqueue, 0, nbytes); 507 + 508 + spin_lock_irqsave(&ch->lock, irq_flags); 509 + if (nentries < ch->remote_nentries) { 510 + dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " 511 + "partid=%d, channel=%d\n", nentries, 512 + ch->remote_nentries, ch->partid, ch->number); 513 + 514 + ch->remote_nentries = nentries; 515 + } 516 + spin_unlock_irqrestore(&ch->lock, irq_flags); 517 + return xpcSuccess; 518 + } 519 + 520 + dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " 521 + "partid=%d, channel=%d\n", ch->partid, ch->number); 522 + return xpcNoMemory; 523 + } 524 + 525 + 526 + /* 527 + * Allocate message queues and other stuff associated with a channel. 528 + * 529 + * Note: Assumes all of the channel sizes are filled in. 530 + */ 531 + static enum xpc_retval 532 + xpc_allocate_msgqueues(struct xpc_channel *ch) 533 + { 534 + unsigned long irq_flags; 535 + int i; 536 + enum xpc_retval ret; 537 + 538 + 539 + DBUG_ON(ch->flags & XPC_C_SETUP); 540 + 541 + if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { 542 + return ret; 543 + } 544 + 545 + if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { 546 + kfree(ch->local_msgqueue_base); 547 + ch->local_msgqueue = NULL; 548 + kfree(ch->notify_queue); 549 + ch->notify_queue = NULL; 550 + return ret; 551 + } 552 + 553 + for (i = 0; i < ch->local_nentries; i++) { 554 + /* use a semaphore as an event wait queue */ 555 + sema_init(&ch->notify_queue[i].sema, 0); 556 + } 557 + 558 + sema_init(&ch->teardown_sema, 0); /* event wait */ 559 + 560 + spin_lock_irqsave(&ch->lock, irq_flags); 561 + ch->flags |= XPC_C_SETUP; 562 + spin_unlock_irqrestore(&ch->lock, irq_flags); 563 + 564 + return xpcSuccess; 565 + } 566 + 567 + 568 + /* 569 + * Process a connect message from a remote partition. 570 + * 571 + * Note: xpc_process_connect() is expecting to be called with the 572 + * spin_lock_irqsave held and will leave it locked upon return. 573 + */ 574 + static void 575 + xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) 576 + { 577 + enum xpc_retval ret; 578 + 579 + 580 + DBUG_ON(!spin_is_locked(&ch->lock)); 581 + 582 + if (!(ch->flags & XPC_C_OPENREQUEST) || 583 + !(ch->flags & XPC_C_ROPENREQUEST)) { 584 + /* nothing more to do for now */ 585 + return; 586 + } 587 + DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); 588 + 589 + if (!(ch->flags & XPC_C_SETUP)) { 590 + spin_unlock_irqrestore(&ch->lock, *irq_flags); 591 + ret = xpc_allocate_msgqueues(ch); 592 + spin_lock_irqsave(&ch->lock, *irq_flags); 593 + 594 + if (ret != xpcSuccess) { 595 + XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 596 + } 597 + if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { 598 + return; 599 + } 600 + 601 + DBUG_ON(!(ch->flags & XPC_C_SETUP)); 602 + DBUG_ON(ch->local_msgqueue == NULL); 603 + DBUG_ON(ch->remote_msgqueue == NULL); 604 + } 605 + 606 + if (!(ch->flags & XPC_C_OPENREPLY)) { 607 + ch->flags |= XPC_C_OPENREPLY; 608 + xpc_IPI_send_openreply(ch, irq_flags); 609 + } 610 + 611 + if (!(ch->flags & XPC_C_ROPENREPLY)) { 612 + return; 613 + } 614 + 615 + DBUG_ON(ch->remote_msgqueue_pa == 0); 616 + 617 + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 618 + 619 + dev_info(xpc_chan, "channel %d to partition %d connected\n", 620 + ch->number, ch->partid); 621 + 622 + spin_unlock_irqrestore(&ch->lock, *irq_flags); 623 + xpc_create_kthreads(ch, 1); 624 + spin_lock_irqsave(&ch->lock, *irq_flags); 625 + } 626 + 627 + 628 + /* 629 + * Free up message queues and other stuff that were allocated for the specified 630 + * channel. 631 + * 632 + * Note: ch->reason and ch->reason_line are left set for debugging purposes, 633 + * they're cleared when XPC_C_DISCONNECTED is cleared. 634 + */ 635 + static void 636 + xpc_free_msgqueues(struct xpc_channel *ch) 637 + { 638 + DBUG_ON(!spin_is_locked(&ch->lock)); 639 + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); 640 + 641 + ch->remote_msgqueue_pa = 0; 642 + ch->func = NULL; 643 + ch->key = NULL; 644 + ch->msg_size = 0; 645 + ch->local_nentries = 0; 646 + ch->remote_nentries = 0; 647 + ch->kthreads_assigned_limit = 0; 648 + ch->kthreads_idle_limit = 0; 649 + 650 + ch->local_GP->get = 0; 651 + ch->local_GP->put = 0; 652 + ch->remote_GP.get = 0; 653 + ch->remote_GP.put = 0; 654 + ch->w_local_GP.get = 0; 655 + ch->w_local_GP.put = 0; 656 + ch->w_remote_GP.get = 0; 657 + ch->w_remote_GP.put = 0; 658 + ch->next_msg_to_pull = 0; 659 + 660 + if (ch->flags & XPC_C_SETUP) { 661 + ch->flags &= ~XPC_C_SETUP; 662 + 663 + dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", 664 + ch->flags, ch->partid, ch->number); 665 + 666 + kfree(ch->local_msgqueue_base); 667 + ch->local_msgqueue = NULL; 668 + kfree(ch->remote_msgqueue_base); 669 + ch->remote_msgqueue = NULL; 670 + kfree(ch->notify_queue); 671 + ch->notify_queue = NULL; 672 + 673 + /* in case someone is waiting for the teardown to complete */ 674 + up(&ch->teardown_sema); 675 + } 676 + } 677 + 678 + 679 + /* 680 + * spin_lock_irqsave() is expected to be held on entry. 681 + */ 682 + static void 683 + xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) 684 + { 685 + struct xpc_partition *part = &xpc_partitions[ch->partid]; 686 + u32 ch_flags = ch->flags; 687 + 688 + 689 + DBUG_ON(!spin_is_locked(&ch->lock)); 690 + 691 + if (!(ch->flags & XPC_C_DISCONNECTING)) { 692 + return; 693 + } 694 + 695 + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 696 + 697 + /* make sure all activity has settled down first */ 698 + 699 + if (atomic_read(&ch->references) > 0) { 700 + return; 701 + } 702 + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 703 + 704 + /* it's now safe to free the channel's message queues */ 705 + 706 + xpc_free_msgqueues(ch); 707 + DBUG_ON(ch->flags & XPC_C_SETUP); 708 + 709 + if (part->act_state != XPC_P_DEACTIVATING) { 710 + 711 + /* as long as the other side is up do the full protocol */ 712 + 713 + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 714 + return; 715 + } 716 + 717 + if (!(ch->flags & XPC_C_CLOSEREPLY)) { 718 + ch->flags |= XPC_C_CLOSEREPLY; 719 + xpc_IPI_send_closereply(ch, irq_flags); 720 + } 721 + 722 + if (!(ch->flags & XPC_C_RCLOSEREPLY)) { 723 + return; 724 + } 725 + } 726 + 727 + /* both sides are disconnected now */ 728 + 729 + ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */ 730 + 731 + atomic_dec(&part->nchannels_active); 732 + 733 + if (ch_flags & XPC_C_WASCONNECTED) { 734 + dev_info(xpc_chan, "channel %d to partition %d disconnected, " 735 + "reason=%d\n", ch->number, ch->partid, ch->reason); 736 + } 737 + } 738 + 739 + 740 + /* 741 + * Process a change in the channel's remote connection state. 742 + */ 743 + static void 744 + xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, 745 + u8 IPI_flags) 746 + { 747 + unsigned long irq_flags; 748 + struct xpc_openclose_args *args = 749 + &part->remote_openclose_args[ch_number]; 750 + struct xpc_channel *ch = &part->channels[ch_number]; 751 + enum xpc_retval reason; 752 + 753 + 754 + 755 + spin_lock_irqsave(&ch->lock, irq_flags); 756 + 757 + 758 + if (IPI_flags & XPC_IPI_CLOSEREQUEST) { 759 + 760 + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " 761 + "from partid=%d, channel=%d\n", args->reason, 762 + ch->partid, ch->number); 763 + 764 + /* 765 + * If RCLOSEREQUEST is set, we're probably waiting for 766 + * RCLOSEREPLY. We should find it and a ROPENREQUEST packed 767 + * with this RCLOSEQREUQEST in the IPI_flags. 768 + */ 769 + 770 + if (ch->flags & XPC_C_RCLOSEREQUEST) { 771 + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 772 + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 773 + DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); 774 + DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); 775 + 776 + DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); 777 + IPI_flags &= ~XPC_IPI_CLOSEREPLY; 778 + ch->flags |= XPC_C_RCLOSEREPLY; 779 + 780 + /* both sides have finished disconnecting */ 781 + xpc_process_disconnect(ch, &irq_flags); 782 + } 783 + 784 + if (ch->flags & XPC_C_DISCONNECTED) { 785 + // >>> explain this section 786 + 787 + if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { 788 + DBUG_ON(part->act_state != 789 + XPC_P_DEACTIVATING); 790 + spin_unlock_irqrestore(&ch->lock, irq_flags); 791 + return; 792 + } 793 + 794 + XPC_SET_REASON(ch, 0, 0); 795 + ch->flags &= ~XPC_C_DISCONNECTED; 796 + 797 + atomic_inc(&part->nchannels_active); 798 + ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); 799 + } 800 + 801 + IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); 802 + 803 + /* 804 + * The meaningful CLOSEREQUEST connection state fields are: 805 + * reason = reason connection is to be closed 806 + */ 807 + 808 + ch->flags |= XPC_C_RCLOSEREQUEST; 809 + 810 + if (!(ch->flags & XPC_C_DISCONNECTING)) { 811 + reason = args->reason; 812 + if (reason <= xpcSuccess || reason > xpcUnknownReason) { 813 + reason = xpcUnknownReason; 814 + } else if (reason == xpcUnregistering) { 815 + reason = xpcOtherUnregistering; 816 + } 817 + 818 + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 819 + } else { 820 + xpc_process_disconnect(ch, &irq_flags); 821 + } 822 + } 823 + 824 + 825 + if (IPI_flags & XPC_IPI_CLOSEREPLY) { 826 + 827 + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," 828 + " channel=%d\n", ch->partid, ch->number); 829 + 830 + if (ch->flags & XPC_C_DISCONNECTED) { 831 + DBUG_ON(part->act_state != XPC_P_DEACTIVATING); 832 + spin_unlock_irqrestore(&ch->lock, irq_flags); 833 + return; 834 + } 835 + 836 + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 837 + DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST)); 838 + 839 + ch->flags |= XPC_C_RCLOSEREPLY; 840 + 841 + if (ch->flags & XPC_C_CLOSEREPLY) { 842 + /* both sides have finished disconnecting */ 843 + xpc_process_disconnect(ch, &irq_flags); 844 + } 845 + } 846 + 847 + 848 + if (IPI_flags & XPC_IPI_OPENREQUEST) { 849 + 850 + dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " 851 + "local_nentries=%d) received from partid=%d, " 852 + "channel=%d\n", args->msg_size, args->local_nentries, 853 + ch->partid, ch->number); 854 + 855 + if ((ch->flags & XPC_C_DISCONNECTING) || 856 + part->act_state == XPC_P_DEACTIVATING) { 857 + spin_unlock_irqrestore(&ch->lock, irq_flags); 858 + return; 859 + } 860 + DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | 861 + XPC_C_OPENREQUEST))); 862 + DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 863 + XPC_C_OPENREPLY | XPC_C_CONNECTED)); 864 + 865 + /* 866 + * The meaningful OPENREQUEST connection state fields are: 867 + * msg_size = size of channel's messages in bytes 868 + * local_nentries = remote partition's local_nentries 869 + */ 870 + DBUG_ON(args->msg_size == 0); 871 + DBUG_ON(args->local_nentries == 0); 872 + 873 + ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); 874 + ch->remote_nentries = args->local_nentries; 875 + 876 + 877 + if (ch->flags & XPC_C_OPENREQUEST) { 878 + if (args->msg_size != ch->msg_size) { 879 + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 880 + &irq_flags); 881 + spin_unlock_irqrestore(&ch->lock, irq_flags); 882 + return; 883 + } 884 + } else { 885 + ch->msg_size = args->msg_size; 886 + 887 + XPC_SET_REASON(ch, 0, 0); 888 + ch->flags &= ~XPC_C_DISCONNECTED; 889 + 890 + atomic_inc(&part->nchannels_active); 891 + } 892 + 893 + xpc_process_connect(ch, &irq_flags); 894 + } 895 + 896 + 897 + if (IPI_flags & XPC_IPI_OPENREPLY) { 898 + 899 + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " 900 + "local_nentries=%d, remote_nentries=%d) received from " 901 + "partid=%d, channel=%d\n", args->local_msgqueue_pa, 902 + args->local_nentries, args->remote_nentries, 903 + ch->partid, ch->number); 904 + 905 + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 906 + spin_unlock_irqrestore(&ch->lock, irq_flags); 907 + return; 908 + } 909 + DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST)); 910 + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); 911 + DBUG_ON(ch->flags & XPC_C_CONNECTED); 912 + 913 + /* 914 + * The meaningful OPENREPLY connection state fields are: 915 + * local_msgqueue_pa = physical address of remote 916 + * partition's local_msgqueue 917 + * local_nentries = remote partition's local_nentries 918 + * remote_nentries = remote partition's remote_nentries 919 + */ 920 + DBUG_ON(args->local_msgqueue_pa == 0); 921 + DBUG_ON(args->local_nentries == 0); 922 + DBUG_ON(args->remote_nentries == 0); 923 + 924 + ch->flags |= XPC_C_ROPENREPLY; 925 + ch->remote_msgqueue_pa = args->local_msgqueue_pa; 926 + 927 + if (args->local_nentries < ch->remote_nentries) { 928 + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " 929 + "remote_nentries=%d, old remote_nentries=%d, " 930 + "partid=%d, channel=%d\n", 931 + args->local_nentries, ch->remote_nentries, 932 + ch->partid, ch->number); 933 + 934 + ch->remote_nentries = args->local_nentries; 935 + } 936 + if (args->remote_nentries < ch->local_nentries) { 937 + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " 938 + "local_nentries=%d, old local_nentries=%d, " 939 + "partid=%d, channel=%d\n", 940 + args->remote_nentries, ch->local_nentries, 941 + ch->partid, ch->number); 942 + 943 + ch->local_nentries = args->remote_nentries; 944 + } 945 + 946 + xpc_process_connect(ch, &irq_flags); 947 + } 948 + 949 + spin_unlock_irqrestore(&ch->lock, irq_flags); 950 + } 951 + 952 + 953 + /* 954 + * Attempt to establish a channel connection to a remote partition. 955 + */ 956 + static enum xpc_retval 957 + xpc_connect_channel(struct xpc_channel *ch) 958 + { 959 + unsigned long irq_flags; 960 + struct xpc_registration *registration = &xpc_registrations[ch->number]; 961 + 962 + 963 + if (down_interruptible(&registration->sema) != 0) { 964 + return xpcInterrupted; 965 + } 966 + 967 + if (!XPC_CHANNEL_REGISTERED(ch->number)) { 968 + up(&registration->sema); 969 + return xpcUnregistered; 970 + } 971 + 972 + spin_lock_irqsave(&ch->lock, irq_flags); 973 + 974 + DBUG_ON(ch->flags & XPC_C_CONNECTED); 975 + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); 976 + 977 + if (ch->flags & XPC_C_DISCONNECTING) { 978 + spin_unlock_irqrestore(&ch->lock, irq_flags); 979 + up(&registration->sema); 980 + return ch->reason; 981 + } 982 + 983 + 984 + /* add info from the channel connect registration to the channel */ 985 + 986 + ch->kthreads_assigned_limit = registration->assigned_limit; 987 + ch->kthreads_idle_limit = registration->idle_limit; 988 + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 989 + DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); 990 + DBUG_ON(atomic_read(&ch->kthreads_active) != 0); 991 + 992 + ch->func = registration->func; 993 + DBUG_ON(registration->func == NULL); 994 + ch->key = registration->key; 995 + 996 + ch->local_nentries = registration->nentries; 997 + 998 + if (ch->flags & XPC_C_ROPENREQUEST) { 999 + if (registration->msg_size != ch->msg_size) { 1000 + /* the local and remote sides aren't the same */ 1001 + 1002 + /* 1003 + * Because XPC_DISCONNECT_CHANNEL() can block we're 1004 + * forced to up the registration sema before we unlock 1005 + * the channel lock. But that's okay here because we're 1006 + * done with the part that required the registration 1007 + * sema. XPC_DISCONNECT_CHANNEL() requires that the 1008 + * channel lock be locked and will unlock and relock 1009 + * the channel lock as needed. 1010 + */ 1011 + up(&registration->sema); 1012 + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1013 + &irq_flags); 1014 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1015 + return xpcUnequalMsgSizes; 1016 + } 1017 + } else { 1018 + ch->msg_size = registration->msg_size; 1019 + 1020 + XPC_SET_REASON(ch, 0, 0); 1021 + ch->flags &= ~XPC_C_DISCONNECTED; 1022 + 1023 + atomic_inc(&xpc_partitions[ch->partid].nchannels_active); 1024 + } 1025 + 1026 + up(&registration->sema); 1027 + 1028 + 1029 + /* initiate the connection */ 1030 + 1031 + ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 1032 + xpc_IPI_send_openrequest(ch, &irq_flags); 1033 + 1034 + xpc_process_connect(ch, &irq_flags); 1035 + 1036 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1037 + 1038 + return xpcSuccess; 1039 + } 1040 + 1041 + 1042 + /* 1043 + * Notify those who wanted to be notified upon delivery of their message. 1044 + */ 1045 + static void 1046 + xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) 1047 + { 1048 + struct xpc_notify *notify; 1049 + u8 notify_type; 1050 + s64 get = ch->w_remote_GP.get - 1; 1051 + 1052 + 1053 + while (++get < put && atomic_read(&ch->n_to_notify) > 0) { 1054 + 1055 + notify = &ch->notify_queue[get % ch->local_nentries]; 1056 + 1057 + /* 1058 + * See if the notify entry indicates it was associated with 1059 + * a message who's sender wants to be notified. It is possible 1060 + * that it is, but someone else is doing or has done the 1061 + * notification. 1062 + */ 1063 + notify_type = notify->type; 1064 + if (notify_type == 0 || 1065 + cmpxchg(&notify->type, notify_type, 0) != 1066 + notify_type) { 1067 + continue; 1068 + } 1069 + 1070 + DBUG_ON(notify_type != XPC_N_CALL); 1071 + 1072 + atomic_dec(&ch->n_to_notify); 1073 + 1074 + if (notify->func != NULL) { 1075 + dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " 1076 + "msg_number=%ld, partid=%d, channel=%d\n", 1077 + (void *) notify, get, ch->partid, ch->number); 1078 + 1079 + notify->func(reason, ch->partid, ch->number, 1080 + notify->key); 1081 + 1082 + dev_dbg(xpc_chan, "notify->func() returned, " 1083 + "notify=0x%p, msg_number=%ld, partid=%d, " 1084 + "channel=%d\n", (void *) notify, get, 1085 + ch->partid, ch->number); 1086 + } 1087 + } 1088 + } 1089 + 1090 + 1091 + /* 1092 + * Clear some of the msg flags in the local message queue. 1093 + */ 1094 + static inline void 1095 + xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) 1096 + { 1097 + struct xpc_msg *msg; 1098 + s64 get; 1099 + 1100 + 1101 + get = ch->w_remote_GP.get; 1102 + do { 1103 + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1104 + (get % ch->local_nentries) * ch->msg_size); 1105 + msg->flags = 0; 1106 + } while (++get < (volatile s64) ch->remote_GP.get); 1107 + } 1108 + 1109 + 1110 + /* 1111 + * Clear some of the msg flags in the remote message queue. 1112 + */ 1113 + static inline void 1114 + xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) 1115 + { 1116 + struct xpc_msg *msg; 1117 + s64 put; 1118 + 1119 + 1120 + put = ch->w_remote_GP.put; 1121 + do { 1122 + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 1123 + (put % ch->remote_nentries) * ch->msg_size); 1124 + msg->flags = 0; 1125 + } while (++put < (volatile s64) ch->remote_GP.put); 1126 + } 1127 + 1128 + 1129 + static void 1130 + xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) 1131 + { 1132 + struct xpc_channel *ch = &part->channels[ch_number]; 1133 + int nmsgs_sent; 1134 + 1135 + 1136 + ch->remote_GP = part->remote_GPs[ch_number]; 1137 + 1138 + 1139 + /* See what, if anything, has changed for each connected channel */ 1140 + 1141 + xpc_msgqueue_ref(ch); 1142 + 1143 + if (ch->w_remote_GP.get == ch->remote_GP.get && 1144 + ch->w_remote_GP.put == ch->remote_GP.put) { 1145 + /* nothing changed since GPs were last pulled */ 1146 + xpc_msgqueue_deref(ch); 1147 + return; 1148 + } 1149 + 1150 + if (!(ch->flags & XPC_C_CONNECTED)){ 1151 + xpc_msgqueue_deref(ch); 1152 + return; 1153 + } 1154 + 1155 + 1156 + /* 1157 + * First check to see if messages recently sent by us have been 1158 + * received by the other side. (The remote GET value will have 1159 + * changed since we last looked at it.) 1160 + */ 1161 + 1162 + if (ch->w_remote_GP.get != ch->remote_GP.get) { 1163 + 1164 + /* 1165 + * We need to notify any senders that want to be notified 1166 + * that their sent messages have been received by their 1167 + * intended recipients. We need to do this before updating 1168 + * w_remote_GP.get so that we don't allocate the same message 1169 + * queue entries prematurely (see xpc_allocate_msg()). 1170 + */ 1171 + if (atomic_read(&ch->n_to_notify) > 0) { 1172 + /* 1173 + * Notify senders that messages sent have been 1174 + * received and delivered by the other side. 1175 + */ 1176 + xpc_notify_senders(ch, xpcMsgDelivered, 1177 + ch->remote_GP.get); 1178 + } 1179 + 1180 + /* 1181 + * Clear msg->flags in previously sent messages, so that 1182 + * they're ready for xpc_allocate_msg(). 1183 + */ 1184 + xpc_clear_local_msgqueue_flags(ch); 1185 + 1186 + (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get; 1187 + 1188 + dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " 1189 + "channel=%d\n", ch->w_remote_GP.get, ch->partid, 1190 + ch->number); 1191 + 1192 + /* 1193 + * If anyone was waiting for message queue entries to become 1194 + * available, wake them up. 1195 + */ 1196 + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1197 + wake_up(&ch->msg_allocate_wq); 1198 + } 1199 + } 1200 + 1201 + 1202 + /* 1203 + * Now check for newly sent messages by the other side. (The remote 1204 + * PUT value will have changed since we last looked at it.) 1205 + */ 1206 + 1207 + if (ch->w_remote_GP.put != ch->remote_GP.put) { 1208 + /* 1209 + * Clear msg->flags in previously received messages, so that 1210 + * they're ready for xpc_get_deliverable_msg(). 1211 + */ 1212 + xpc_clear_remote_msgqueue_flags(ch); 1213 + 1214 + (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put; 1215 + 1216 + dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " 1217 + "channel=%d\n", ch->w_remote_GP.put, ch->partid, 1218 + ch->number); 1219 + 1220 + nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get; 1221 + if (nmsgs_sent > 0) { 1222 + dev_dbg(xpc_chan, "msgs waiting to be copied and " 1223 + "delivered=%d, partid=%d, channel=%d\n", 1224 + nmsgs_sent, ch->partid, ch->number); 1225 + 1226 + if (ch->flags & XPC_C_CONNECTCALLOUT) { 1227 + xpc_activate_kthreads(ch, nmsgs_sent); 1228 + } 1229 + } 1230 + } 1231 + 1232 + xpc_msgqueue_deref(ch); 1233 + } 1234 + 1235 + 1236 + void 1237 + xpc_process_channel_activity(struct xpc_partition *part) 1238 + { 1239 + unsigned long irq_flags; 1240 + u64 IPI_amo, IPI_flags; 1241 + struct xpc_channel *ch; 1242 + int ch_number; 1243 + 1244 + 1245 + IPI_amo = xpc_get_IPI_flags(part); 1246 + 1247 + /* 1248 + * Initiate channel connections for registered channels. 1249 + * 1250 + * For each connected channel that has pending messages activate idle 1251 + * kthreads and/or create new kthreads as needed. 1252 + */ 1253 + 1254 + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1255 + ch = &part->channels[ch_number]; 1256 + 1257 + 1258 + /* 1259 + * Process any open or close related IPI flags, and then deal 1260 + * with connecting or disconnecting the channel as required. 1261 + */ 1262 + 1263 + IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); 1264 + 1265 + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { 1266 + xpc_process_openclose_IPI(part, ch_number, IPI_flags); 1267 + } 1268 + 1269 + 1270 + if (ch->flags & XPC_C_DISCONNECTING) { 1271 + spin_lock_irqsave(&ch->lock, irq_flags); 1272 + xpc_process_disconnect(ch, &irq_flags); 1273 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1274 + continue; 1275 + } 1276 + 1277 + if (part->act_state == XPC_P_DEACTIVATING) { 1278 + continue; 1279 + } 1280 + 1281 + if (!(ch->flags & XPC_C_CONNECTED)) { 1282 + if (!(ch->flags & XPC_C_OPENREQUEST)) { 1283 + DBUG_ON(ch->flags & XPC_C_SETUP); 1284 + (void) xpc_connect_channel(ch); 1285 + } else { 1286 + spin_lock_irqsave(&ch->lock, irq_flags); 1287 + xpc_process_connect(ch, &irq_flags); 1288 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1289 + } 1290 + continue; 1291 + } 1292 + 1293 + 1294 + /* 1295 + * Process any message related IPI flags, this may involve the 1296 + * activation of kthreads to deliver any pending messages sent 1297 + * from the other partition. 1298 + */ 1299 + 1300 + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { 1301 + xpc_process_msg_IPI(part, ch_number); 1302 + } 1303 + } 1304 + } 1305 + 1306 + 1307 + /* 1308 + * XPC's heartbeat code calls this function to inform XPC that a partition has 1309 + * gone down. XPC responds by tearing down the XPartition Communication 1310 + * infrastructure used for the just downed partition. 1311 + * 1312 + * XPC's heartbeat code will never call this function and xpc_partition_up() 1313 + * at the same time. Nor will it ever make multiple calls to either function 1314 + * at the same time. 1315 + */ 1316 + void 1317 + xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason) 1318 + { 1319 + unsigned long irq_flags; 1320 + int ch_number; 1321 + struct xpc_channel *ch; 1322 + 1323 + 1324 + dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", 1325 + XPC_PARTID(part), reason); 1326 + 1327 + if (!xpc_part_ref(part)) { 1328 + /* infrastructure for this partition isn't currently set up */ 1329 + return; 1330 + } 1331 + 1332 + 1333 + /* disconnect all channels associated with the downed partition */ 1334 + 1335 + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1336 + ch = &part->channels[ch_number]; 1337 + 1338 + 1339 + xpc_msgqueue_ref(ch); 1340 + spin_lock_irqsave(&ch->lock, irq_flags); 1341 + 1342 + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 1343 + 1344 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1345 + xpc_msgqueue_deref(ch); 1346 + } 1347 + 1348 + xpc_wakeup_channel_mgr(part); 1349 + 1350 + xpc_part_deref(part); 1351 + } 1352 + 1353 + 1354 + /* 1355 + * Teardown the infrastructure necessary to support XPartition Communication 1356 + * between the specified remote partition and the local one. 1357 + */ 1358 + void 1359 + xpc_teardown_infrastructure(struct xpc_partition *part) 1360 + { 1361 + partid_t partid = XPC_PARTID(part); 1362 + 1363 + 1364 + /* 1365 + * We start off by making this partition inaccessible to local 1366 + * processes by marking it as no longer setup. Then we make it 1367 + * inaccessible to remote processes by clearing the XPC per partition 1368 + * specific variable's magic # (which indicates that these variables 1369 + * are no longer valid) and by ignoring all XPC notify IPIs sent to 1370 + * this partition. 1371 + */ 1372 + 1373 + DBUG_ON(atomic_read(&part->nchannels_active) != 0); 1374 + DBUG_ON(part->setup_state != XPC_P_SETUP); 1375 + part->setup_state = XPC_P_WTEARDOWN; 1376 + 1377 + xpc_vars_part[partid].magic = 0; 1378 + 1379 + 1380 + free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid); 1381 + 1382 + 1383 + /* 1384 + * Before proceding with the teardown we have to wait until all 1385 + * existing references cease. 1386 + */ 1387 + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); 1388 + 1389 + 1390 + /* now we can begin tearing down the infrastructure */ 1391 + 1392 + part->setup_state = XPC_P_TORNDOWN; 1393 + 1394 + /* in case we've still got outstanding timers registered... */ 1395 + del_timer_sync(&part->dropped_IPI_timer); 1396 + 1397 + kfree(part->remote_openclose_args_base); 1398 + part->remote_openclose_args = NULL; 1399 + kfree(part->local_openclose_args_base); 1400 + part->local_openclose_args = NULL; 1401 + kfree(part->remote_GPs_base); 1402 + part->remote_GPs = NULL; 1403 + kfree(part->local_GPs_base); 1404 + part->local_GPs = NULL; 1405 + kfree(part->channels); 1406 + part->channels = NULL; 1407 + part->local_IPI_amo_va = NULL; 1408 + } 1409 + 1410 + 1411 + /* 1412 + * Called by XP at the time of channel connection registration to cause 1413 + * XPC to establish connections to all currently active partitions. 1414 + */ 1415 + void 1416 + xpc_initiate_connect(int ch_number) 1417 + { 1418 + partid_t partid; 1419 + struct xpc_partition *part; 1420 + struct xpc_channel *ch; 1421 + 1422 + 1423 + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1424 + 1425 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1426 + part = &xpc_partitions[partid]; 1427 + 1428 + if (xpc_part_ref(part)) { 1429 + ch = &part->channels[ch_number]; 1430 + 1431 + if (!(ch->flags & XPC_C_DISCONNECTING)) { 1432 + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); 1433 + DBUG_ON(ch->flags & XPC_C_CONNECTED); 1434 + DBUG_ON(ch->flags & XPC_C_SETUP); 1435 + 1436 + /* 1437 + * Initiate the establishment of a connection 1438 + * on the newly registered channel to the 1439 + * remote partition. 1440 + */ 1441 + xpc_wakeup_channel_mgr(part); 1442 + } 1443 + 1444 + xpc_part_deref(part); 1445 + } 1446 + } 1447 + } 1448 + 1449 + 1450 + void 1451 + xpc_connected_callout(struct xpc_channel *ch) 1452 + { 1453 + unsigned long irq_flags; 1454 + 1455 + 1456 + /* let the registerer know that a connection has been established */ 1457 + 1458 + if (ch->func != NULL) { 1459 + dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " 1460 + "partid=%d, channel=%d\n", ch->partid, ch->number); 1461 + 1462 + ch->func(xpcConnected, ch->partid, ch->number, 1463 + (void *) (u64) ch->local_nentries, ch->key); 1464 + 1465 + dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " 1466 + "partid=%d, channel=%d\n", ch->partid, ch->number); 1467 + } 1468 + 1469 + spin_lock_irqsave(&ch->lock, irq_flags); 1470 + ch->flags |= XPC_C_CONNECTCALLOUT; 1471 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1472 + } 1473 + 1474 + 1475 + /* 1476 + * Called by XP at the time of channel connection unregistration to cause 1477 + * XPC to teardown all current connections for the specified channel. 1478 + * 1479 + * Before returning xpc_initiate_disconnect() will wait until all connections 1480 + * on the specified channel have been closed/torndown. So the caller can be 1481 + * assured that they will not be receiving any more callouts from XPC to the 1482 + * function they registered via xpc_connect(). 1483 + * 1484 + * Arguments: 1485 + * 1486 + * ch_number - channel # to unregister. 1487 + */ 1488 + void 1489 + xpc_initiate_disconnect(int ch_number) 1490 + { 1491 + unsigned long irq_flags; 1492 + partid_t partid; 1493 + struct xpc_partition *part; 1494 + struct xpc_channel *ch; 1495 + 1496 + 1497 + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1498 + 1499 + /* initiate the channel disconnect for every active partition */ 1500 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1501 + part = &xpc_partitions[partid]; 1502 + 1503 + if (xpc_part_ref(part)) { 1504 + ch = &part->channels[ch_number]; 1505 + xpc_msgqueue_ref(ch); 1506 + 1507 + spin_lock_irqsave(&ch->lock, irq_flags); 1508 + 1509 + XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, 1510 + &irq_flags); 1511 + 1512 + spin_unlock_irqrestore(&ch->lock, irq_flags); 1513 + 1514 + xpc_msgqueue_deref(ch); 1515 + xpc_part_deref(part); 1516 + } 1517 + } 1518 + 1519 + xpc_disconnect_wait(ch_number); 1520 + } 1521 + 1522 + 1523 + /* 1524 + * To disconnect a channel, and reflect it back to all who may be waiting. 1525 + * 1526 + * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by 1527 + * >>> xpc_free_msgqueues(). 1528 + * 1529 + * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. 1530 + */ 1531 + void 1532 + xpc_disconnect_channel(const int line, struct xpc_channel *ch, 1533 + enum xpc_retval reason, unsigned long *irq_flags) 1534 + { 1535 + u32 flags; 1536 + 1537 + 1538 + DBUG_ON(!spin_is_locked(&ch->lock)); 1539 + 1540 + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 1541 + return; 1542 + } 1543 + DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); 1544 + 1545 + dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", 1546 + reason, line, ch->partid, ch->number); 1547 + 1548 + XPC_SET_REASON(ch, reason, line); 1549 + 1550 + flags = ch->flags; 1551 + /* some of these may not have been set */ 1552 + ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | 1553 + XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 1554 + XPC_C_CONNECTING | XPC_C_CONNECTED); 1555 + 1556 + ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); 1557 + xpc_IPI_send_closerequest(ch, irq_flags); 1558 + 1559 + if (flags & XPC_C_CONNECTED) { 1560 + ch->flags |= XPC_C_WASCONNECTED; 1561 + } 1562 + 1563 + if (atomic_read(&ch->kthreads_idle) > 0) { 1564 + /* wake all idle kthreads so they can exit */ 1565 + wake_up_all(&ch->idle_wq); 1566 + } 1567 + 1568 + spin_unlock_irqrestore(&ch->lock, *irq_flags); 1569 + 1570 + 1571 + /* wake those waiting to allocate an entry from the local msg queue */ 1572 + 1573 + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1574 + wake_up(&ch->msg_allocate_wq); 1575 + } 1576 + 1577 + /* wake those waiting for notify completion */ 1578 + 1579 + if (atomic_read(&ch->n_to_notify) > 0) { 1580 + xpc_notify_senders(ch, reason, ch->w_local_GP.put); 1581 + } 1582 + 1583 + spin_lock_irqsave(&ch->lock, *irq_flags); 1584 + } 1585 + 1586 + 1587 + void 1588 + xpc_disconnected_callout(struct xpc_channel *ch) 1589 + { 1590 + /* 1591 + * Let the channel's registerer know that the channel is now 1592 + * disconnected. We don't want to do this if the registerer was never 1593 + * informed of a connection being made, unless the disconnect was for 1594 + * abnormal reasons. 1595 + */ 1596 + 1597 + if (ch->func != NULL) { 1598 + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " 1599 + "channel=%d\n", ch->reason, ch->partid, ch->number); 1600 + 1601 + ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key); 1602 + 1603 + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " 1604 + "channel=%d\n", ch->reason, ch->partid, ch->number); 1605 + } 1606 + } 1607 + 1608 + 1609 + /* 1610 + * Wait for a message entry to become available for the specified channel, 1611 + * but don't wait any longer than 1 jiffy. 1612 + */ 1613 + static enum xpc_retval 1614 + xpc_allocate_msg_wait(struct xpc_channel *ch) 1615 + { 1616 + enum xpc_retval ret; 1617 + 1618 + 1619 + if (ch->flags & XPC_C_DISCONNECTING) { 1620 + DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1621 + return ch->reason; 1622 + } 1623 + 1624 + atomic_inc(&ch->n_on_msg_allocate_wq); 1625 + ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); 1626 + atomic_dec(&ch->n_on_msg_allocate_wq); 1627 + 1628 + if (ch->flags & XPC_C_DISCONNECTING) { 1629 + ret = ch->reason; 1630 + DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1631 + } else if (ret == 0) { 1632 + ret = xpcTimeout; 1633 + } else { 1634 + ret = xpcInterrupted; 1635 + } 1636 + 1637 + return ret; 1638 + } 1639 + 1640 + 1641 + /* 1642 + * Allocate an entry for a message from the message queue associated with the 1643 + * specified channel. 1644 + */ 1645 + static enum xpc_retval 1646 + xpc_allocate_msg(struct xpc_channel *ch, u32 flags, 1647 + struct xpc_msg **address_of_msg) 1648 + { 1649 + struct xpc_msg *msg; 1650 + enum xpc_retval ret; 1651 + s64 put; 1652 + 1653 + 1654 + /* this reference will be dropped in xpc_send_msg() */ 1655 + xpc_msgqueue_ref(ch); 1656 + 1657 + if (ch->flags & XPC_C_DISCONNECTING) { 1658 + xpc_msgqueue_deref(ch); 1659 + return ch->reason; 1660 + } 1661 + if (!(ch->flags & XPC_C_CONNECTED)) { 1662 + xpc_msgqueue_deref(ch); 1663 + return xpcNotConnected; 1664 + } 1665 + 1666 + 1667 + /* 1668 + * Get the next available message entry from the local message queue. 1669 + * If none are available, we'll make sure that we grab the latest 1670 + * GP values. 1671 + */ 1672 + ret = xpcTimeout; 1673 + 1674 + while (1) { 1675 + 1676 + put = (volatile s64) ch->w_local_GP.put; 1677 + if (put - (volatile s64) ch->w_remote_GP.get < 1678 + ch->local_nentries) { 1679 + 1680 + /* There are available message entries. We need to try 1681 + * to secure one for ourselves. We'll do this by trying 1682 + * to increment w_local_GP.put as long as someone else 1683 + * doesn't beat us to it. If they do, we'll have to 1684 + * try again. 1685 + */ 1686 + if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == 1687 + put) { 1688 + /* we got the entry referenced by put */ 1689 + break; 1690 + } 1691 + continue; /* try again */ 1692 + } 1693 + 1694 + 1695 + /* 1696 + * There aren't any available msg entries at this time. 1697 + * 1698 + * In waiting for a message entry to become available, 1699 + * we set a timeout in case the other side is not 1700 + * sending completion IPIs. This lets us fake an IPI 1701 + * that will cause the IPI handler to fetch the latest 1702 + * GP values as if an IPI was sent by the other side. 1703 + */ 1704 + if (ret == xpcTimeout) { 1705 + xpc_IPI_send_local_msgrequest(ch); 1706 + } 1707 + 1708 + if (flags & XPC_NOWAIT) { 1709 + xpc_msgqueue_deref(ch); 1710 + return xpcNoWait; 1711 + } 1712 + 1713 + ret = xpc_allocate_msg_wait(ch); 1714 + if (ret != xpcInterrupted && ret != xpcTimeout) { 1715 + xpc_msgqueue_deref(ch); 1716 + return ret; 1717 + } 1718 + } 1719 + 1720 + 1721 + /* get the message's address and initialize it */ 1722 + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1723 + (put % ch->local_nentries) * ch->msg_size); 1724 + 1725 + 1726 + DBUG_ON(msg->flags != 0); 1727 + msg->number = put; 1728 + 1729 + dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " 1730 + "msg_number=%ld, partid=%d, channel=%d\n", put + 1, 1731 + (void *) msg, msg->number, ch->partid, ch->number); 1732 + 1733 + *address_of_msg = msg; 1734 + 1735 + return xpcSuccess; 1736 + } 1737 + 1738 + 1739 + /* 1740 + * Allocate an entry for a message from the message queue associated with the 1741 + * specified channel. NOTE that this routine can sleep waiting for a message 1742 + * entry to become available. To not sleep, pass in the XPC_NOWAIT flag. 1743 + * 1744 + * Arguments: 1745 + * 1746 + * partid - ID of partition to which the channel is connected. 1747 + * ch_number - channel #. 1748 + * flags - see xpc.h for valid flags. 1749 + * payload - address of the allocated payload area pointer (filled in on 1750 + * return) in which the user-defined message is constructed. 1751 + */ 1752 + enum xpc_retval 1753 + xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) 1754 + { 1755 + struct xpc_partition *part = &xpc_partitions[partid]; 1756 + enum xpc_retval ret = xpcUnknownReason; 1757 + struct xpc_msg *msg; 1758 + 1759 + 1760 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1761 + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 1762 + 1763 + *payload = NULL; 1764 + 1765 + if (xpc_part_ref(part)) { 1766 + ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); 1767 + xpc_part_deref(part); 1768 + 1769 + if (msg != NULL) { 1770 + *payload = &msg->payload; 1771 + } 1772 + } 1773 + 1774 + return ret; 1775 + } 1776 + 1777 + 1778 + /* 1779 + * Now we actually send the messages that are ready to be sent by advancing 1780 + * the local message queue's Put value and then send an IPI to the recipient 1781 + * partition. 1782 + */ 1783 + static void 1784 + xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) 1785 + { 1786 + struct xpc_msg *msg; 1787 + s64 put = initial_put + 1; 1788 + int send_IPI = 0; 1789 + 1790 + 1791 + while (1) { 1792 + 1793 + while (1) { 1794 + if (put == (volatile s64) ch->w_local_GP.put) { 1795 + break; 1796 + } 1797 + 1798 + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1799 + (put % ch->local_nentries) * ch->msg_size); 1800 + 1801 + if (!(msg->flags & XPC_M_READY)) { 1802 + break; 1803 + } 1804 + 1805 + put++; 1806 + } 1807 + 1808 + if (put == initial_put) { 1809 + /* nothing's changed */ 1810 + break; 1811 + } 1812 + 1813 + if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != 1814 + initial_put) { 1815 + /* someone else beat us to it */ 1816 + DBUG_ON((volatile s64) ch->local_GP->put < initial_put); 1817 + break; 1818 + } 1819 + 1820 + /* we just set the new value of local_GP->put */ 1821 + 1822 + dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " 1823 + "channel=%d\n", put, ch->partid, ch->number); 1824 + 1825 + send_IPI = 1; 1826 + 1827 + /* 1828 + * We need to ensure that the message referenced by 1829 + * local_GP->put is not XPC_M_READY or that local_GP->put 1830 + * equals w_local_GP.put, so we'll go have a look. 1831 + */ 1832 + initial_put = put; 1833 + } 1834 + 1835 + if (send_IPI) { 1836 + xpc_IPI_send_msgrequest(ch); 1837 + } 1838 + } 1839 + 1840 + 1841 + /* 1842 + * Common code that does the actual sending of the message by advancing the 1843 + * local message queue's Put value and sends an IPI to the partition the 1844 + * message is being sent to. 1845 + */ 1846 + static enum xpc_retval 1847 + xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, 1848 + xpc_notify_func func, void *key) 1849 + { 1850 + enum xpc_retval ret = xpcSuccess; 1851 + struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!! 1852 + s64 put, msg_number = msg->number; 1853 + 1854 + 1855 + DBUG_ON(notify_type == XPC_N_CALL && func == NULL); 1856 + DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != 1857 + msg_number % ch->local_nentries); 1858 + DBUG_ON(msg->flags & XPC_M_READY); 1859 + 1860 + if (ch->flags & XPC_C_DISCONNECTING) { 1861 + /* drop the reference grabbed in xpc_allocate_msg() */ 1862 + xpc_msgqueue_deref(ch); 1863 + return ch->reason; 1864 + } 1865 + 1866 + if (notify_type != 0) { 1867 + /* 1868 + * Tell the remote side to send an ACK interrupt when the 1869 + * message has been delivered. 1870 + */ 1871 + msg->flags |= XPC_M_INTERRUPT; 1872 + 1873 + atomic_inc(&ch->n_to_notify); 1874 + 1875 + notify = &ch->notify_queue[msg_number % ch->local_nentries]; 1876 + notify->func = func; 1877 + notify->key = key; 1878 + (volatile u8) notify->type = notify_type; 1879 + 1880 + // >>> is a mb() needed here? 1881 + 1882 + if (ch->flags & XPC_C_DISCONNECTING) { 1883 + /* 1884 + * An error occurred between our last error check and 1885 + * this one. We will try to clear the type field from 1886 + * the notify entry. If we succeed then 1887 + * xpc_disconnect_channel() didn't already process 1888 + * the notify entry. 1889 + */ 1890 + if (cmpxchg(&notify->type, notify_type, 0) == 1891 + notify_type) { 1892 + atomic_dec(&ch->n_to_notify); 1893 + ret = ch->reason; 1894 + } 1895 + 1896 + /* drop the reference grabbed in xpc_allocate_msg() */ 1897 + xpc_msgqueue_deref(ch); 1898 + return ret; 1899 + } 1900 + } 1901 + 1902 + msg->flags |= XPC_M_READY; 1903 + 1904 + /* 1905 + * The preceding store of msg->flags must occur before the following 1906 + * load of ch->local_GP->put. 1907 + */ 1908 + mb(); 1909 + 1910 + /* see if the message is next in line to be sent, if so send it */ 1911 + 1912 + put = ch->local_GP->put; 1913 + if (put == msg_number) { 1914 + xpc_send_msgs(ch, put); 1915 + } 1916 + 1917 + /* drop the reference grabbed in xpc_allocate_msg() */ 1918 + xpc_msgqueue_deref(ch); 1919 + return ret; 1920 + } 1921 + 1922 + 1923 + /* 1924 + * Send a message previously allocated using xpc_initiate_allocate() on the 1925 + * specified channel connected to the specified partition. 1926 + * 1927 + * This routine will not wait for the message to be received, nor will 1928 + * notification be given when it does happen. Once this routine has returned 1929 + * the message entry allocated via xpc_initiate_allocate() is no longer 1930 + * accessable to the caller. 1931 + * 1932 + * This routine, although called by users, does not call xpc_part_ref() to 1933 + * ensure that the partition infrastructure is in place. It relies on the 1934 + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). 1935 + * 1936 + * Arguments: 1937 + * 1938 + * partid - ID of partition to which the channel is connected. 1939 + * ch_number - channel # to send message on. 1940 + * payload - pointer to the payload area allocated via 1941 + * xpc_initiate_allocate(). 1942 + */ 1943 + enum xpc_retval 1944 + xpc_initiate_send(partid_t partid, int ch_number, void *payload) 1945 + { 1946 + struct xpc_partition *part = &xpc_partitions[partid]; 1947 + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1948 + enum xpc_retval ret; 1949 + 1950 + 1951 + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, 1952 + partid, ch_number); 1953 + 1954 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1955 + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 1956 + DBUG_ON(msg == NULL); 1957 + 1958 + ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); 1959 + 1960 + return ret; 1961 + } 1962 + 1963 + 1964 + /* 1965 + * Send a message previously allocated using xpc_initiate_allocate on the 1966 + * specified channel connected to the specified partition. 1967 + * 1968 + * This routine will not wait for the message to be sent. Once this routine 1969 + * has returned the message entry allocated via xpc_initiate_allocate() is no 1970 + * longer accessable to the caller. 1971 + * 1972 + * Once the remote end of the channel has received the message, the function 1973 + * passed as an argument to xpc_initiate_send_notify() will be called. This 1974 + * allows the sender to free up or re-use any buffers referenced by the 1975 + * message, but does NOT mean the message has been processed at the remote 1976 + * end by a receiver. 1977 + * 1978 + * If this routine returns an error, the caller's function will NOT be called. 1979 + * 1980 + * This routine, although called by users, does not call xpc_part_ref() to 1981 + * ensure that the partition infrastructure is in place. It relies on the 1982 + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). 1983 + * 1984 + * Arguments: 1985 + * 1986 + * partid - ID of partition to which the channel is connected. 1987 + * ch_number - channel # to send message on. 1988 + * payload - pointer to the payload area allocated via 1989 + * xpc_initiate_allocate(). 1990 + * func - function to call with asynchronous notification of message 1991 + * receipt. THIS FUNCTION MUST BE NON-BLOCKING. 1992 + * key - user-defined key to be passed to the function when it's called. 1993 + */ 1994 + enum xpc_retval 1995 + xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, 1996 + xpc_notify_func func, void *key) 1997 + { 1998 + struct xpc_partition *part = &xpc_partitions[partid]; 1999 + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 2000 + enum xpc_retval ret; 2001 + 2002 + 2003 + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, 2004 + partid, ch_number); 2005 + 2006 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 2007 + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 2008 + DBUG_ON(msg == NULL); 2009 + DBUG_ON(func == NULL); 2010 + 2011 + ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, 2012 + func, key); 2013 + return ret; 2014 + } 2015 + 2016 + 2017 + static struct xpc_msg * 2018 + xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) 2019 + { 2020 + struct xpc_partition *part = &xpc_partitions[ch->partid]; 2021 + struct xpc_msg *remote_msg, *msg; 2022 + u32 msg_index, nmsgs; 2023 + u64 msg_offset; 2024 + enum xpc_retval ret; 2025 + 2026 + 2027 + if (down_interruptible(&ch->msg_to_pull_sema) != 0) { 2028 + /* we were interrupted by a signal */ 2029 + return NULL; 2030 + } 2031 + 2032 + while (get >= ch->next_msg_to_pull) { 2033 + 2034 + /* pull as many messages as are ready and able to be pulled */ 2035 + 2036 + msg_index = ch->next_msg_to_pull % ch->remote_nentries; 2037 + 2038 + DBUG_ON(ch->next_msg_to_pull >= 2039 + (volatile s64) ch->w_remote_GP.put); 2040 + nmsgs = (volatile s64) ch->w_remote_GP.put - 2041 + ch->next_msg_to_pull; 2042 + if (msg_index + nmsgs > ch->remote_nentries) { 2043 + /* ignore the ones that wrap the msg queue for now */ 2044 + nmsgs = ch->remote_nentries - msg_index; 2045 + } 2046 + 2047 + msg_offset = msg_index * ch->msg_size; 2048 + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2049 + msg_offset); 2050 + remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + 2051 + msg_offset); 2052 + 2053 + if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, 2054 + nmsgs * ch->msg_size)) != xpcSuccess) { 2055 + 2056 + dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 2057 + " msg %ld from partition %d, channel=%d, " 2058 + "ret=%d\n", nmsgs, ch->next_msg_to_pull, 2059 + ch->partid, ch->number, ret); 2060 + 2061 + XPC_DEACTIVATE_PARTITION(part, ret); 2062 + 2063 + up(&ch->msg_to_pull_sema); 2064 + return NULL; 2065 + } 2066 + 2067 + mb(); /* >>> this may not be needed, we're not sure */ 2068 + 2069 + ch->next_msg_to_pull += nmsgs; 2070 + } 2071 + 2072 + up(&ch->msg_to_pull_sema); 2073 + 2074 + /* return the message we were looking for */ 2075 + msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2076 + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); 2077 + 2078 + return msg; 2079 + } 2080 + 2081 + 2082 + /* 2083 + * Get a message to be delivered. 2084 + */ 2085 + static struct xpc_msg * 2086 + xpc_get_deliverable_msg(struct xpc_channel *ch) 2087 + { 2088 + struct xpc_msg *msg = NULL; 2089 + s64 get; 2090 + 2091 + 2092 + do { 2093 + if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { 2094 + break; 2095 + } 2096 + 2097 + get = (volatile s64) ch->w_local_GP.get; 2098 + if (get == (volatile s64) ch->w_remote_GP.put) { 2099 + break; 2100 + } 2101 + 2102 + /* There are messages waiting to be pulled and delivered. 2103 + * We need to try to secure one for ourselves. We'll do this 2104 + * by trying to increment w_local_GP.get and hope that no one 2105 + * else beats us to it. If they do, we'll we'll simply have 2106 + * to try again for the next one. 2107 + */ 2108 + 2109 + if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { 2110 + /* we got the entry referenced by get */ 2111 + 2112 + dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " 2113 + "partid=%d, channel=%d\n", get + 1, 2114 + ch->partid, ch->number); 2115 + 2116 + /* pull the message from the remote partition */ 2117 + 2118 + msg = xpc_pull_remote_msg(ch, get); 2119 + 2120 + DBUG_ON(msg != NULL && msg->number != get); 2121 + DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); 2122 + DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); 2123 + 2124 + break; 2125 + } 2126 + 2127 + } while (1); 2128 + 2129 + return msg; 2130 + } 2131 + 2132 + 2133 + /* 2134 + * Deliver a message to its intended recipient. 2135 + */ 2136 + void 2137 + xpc_deliver_msg(struct xpc_channel *ch) 2138 + { 2139 + struct xpc_msg *msg; 2140 + 2141 + 2142 + if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { 2143 + 2144 + /* 2145 + * This ref is taken to protect the payload itself from being 2146 + * freed before the user is finished with it, which the user 2147 + * indicates by calling xpc_initiate_received(). 2148 + */ 2149 + xpc_msgqueue_ref(ch); 2150 + 2151 + atomic_inc(&ch->kthreads_active); 2152 + 2153 + if (ch->func != NULL) { 2154 + dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " 2155 + "msg_number=%ld, partid=%d, channel=%d\n", 2156 + (void *) msg, msg->number, ch->partid, 2157 + ch->number); 2158 + 2159 + /* deliver the message to its intended recipient */ 2160 + ch->func(xpcMsgReceived, ch->partid, ch->number, 2161 + &msg->payload, ch->key); 2162 + 2163 + dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " 2164 + "msg_number=%ld, partid=%d, channel=%d\n", 2165 + (void *) msg, msg->number, ch->partid, 2166 + ch->number); 2167 + } 2168 + 2169 + atomic_dec(&ch->kthreads_active); 2170 + } 2171 + } 2172 + 2173 + 2174 + /* 2175 + * Now we actually acknowledge the messages that have been delivered and ack'd 2176 + * by advancing the cached remote message queue's Get value and if requested 2177 + * send an IPI to the message sender's partition. 2178 + */ 2179 + static void 2180 + xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) 2181 + { 2182 + struct xpc_msg *msg; 2183 + s64 get = initial_get + 1; 2184 + int send_IPI = 0; 2185 + 2186 + 2187 + while (1) { 2188 + 2189 + while (1) { 2190 + if (get == (volatile s64) ch->w_local_GP.get) { 2191 + break; 2192 + } 2193 + 2194 + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2195 + (get % ch->remote_nentries) * ch->msg_size); 2196 + 2197 + if (!(msg->flags & XPC_M_DONE)) { 2198 + break; 2199 + } 2200 + 2201 + msg_flags |= msg->flags; 2202 + get++; 2203 + } 2204 + 2205 + if (get == initial_get) { 2206 + /* nothing's changed */ 2207 + break; 2208 + } 2209 + 2210 + if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != 2211 + initial_get) { 2212 + /* someone else beat us to it */ 2213 + DBUG_ON((volatile s64) ch->local_GP->get <= 2214 + initial_get); 2215 + break; 2216 + } 2217 + 2218 + /* we just set the new value of local_GP->get */ 2219 + 2220 + dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " 2221 + "channel=%d\n", get, ch->partid, ch->number); 2222 + 2223 + send_IPI = (msg_flags & XPC_M_INTERRUPT); 2224 + 2225 + /* 2226 + * We need to ensure that the message referenced by 2227 + * local_GP->get is not XPC_M_DONE or that local_GP->get 2228 + * equals w_local_GP.get, so we'll go have a look. 2229 + */ 2230 + initial_get = get; 2231 + } 2232 + 2233 + if (send_IPI) { 2234 + xpc_IPI_send_msgrequest(ch); 2235 + } 2236 + } 2237 + 2238 + 2239 + /* 2240 + * Acknowledge receipt of a delivered message. 2241 + * 2242 + * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition 2243 + * that sent the message. 2244 + * 2245 + * This function, although called by users, does not call xpc_part_ref() to 2246 + * ensure that the partition infrastructure is in place. It relies on the 2247 + * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). 2248 + * 2249 + * Arguments: 2250 + * 2251 + * partid - ID of partition to which the channel is connected. 2252 + * ch_number - channel # message received on. 2253 + * payload - pointer to the payload area allocated via 2254 + * xpc_initiate_allocate(). 2255 + */ 2256 + void 2257 + xpc_initiate_received(partid_t partid, int ch_number, void *payload) 2258 + { 2259 + struct xpc_partition *part = &xpc_partitions[partid]; 2260 + struct xpc_channel *ch; 2261 + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 2262 + s64 get, msg_number = msg->number; 2263 + 2264 + 2265 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 2266 + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 2267 + 2268 + ch = &part->channels[ch_number]; 2269 + 2270 + dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 2271 + (void *) msg, msg_number, ch->partid, ch->number); 2272 + 2273 + DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != 2274 + msg_number % ch->remote_nentries); 2275 + DBUG_ON(msg->flags & XPC_M_DONE); 2276 + 2277 + msg->flags |= XPC_M_DONE; 2278 + 2279 + /* 2280 + * The preceding store of msg->flags must occur before the following 2281 + * load of ch->local_GP->get. 2282 + */ 2283 + mb(); 2284 + 2285 + /* 2286 + * See if this message is next in line to be acknowledged as having 2287 + * been delivered. 2288 + */ 2289 + get = ch->local_GP->get; 2290 + if (get == msg_number) { 2291 + xpc_acknowledge_msgs(ch, get, msg->flags); 2292 + } 2293 + 2294 + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ 2295 + xpc_msgqueue_deref(ch); 2296 + } 2297 +
+1064
arch/ia64/sn/kernel/xpc_main.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 + */ 8 + 9 + 10 + /* 11 + * Cross Partition Communication (XPC) support - standard version. 12 + * 13 + * XPC provides a message passing capability that crosses partition 14 + * boundaries. This module is made up of two parts: 15 + * 16 + * partition This part detects the presence/absence of other 17 + * partitions. It provides a heartbeat and monitors 18 + * the heartbeats of other partitions. 19 + * 20 + * channel This part manages the channels and sends/receives 21 + * messages across them to/from other partitions. 22 + * 23 + * There are a couple of additional functions residing in XP, which 24 + * provide an interface to XPC for its users. 25 + * 26 + * 27 + * Caveats: 28 + * 29 + * . We currently have no way to determine which nasid an IPI came 30 + * from. Thus, xpc_IPI_send() does a remote AMO write followed by 31 + * an IPI. The AMO indicates where data is to be pulled from, so 32 + * after the IPI arrives, the remote partition checks the AMO word. 33 + * The IPI can actually arrive before the AMO however, so other code 34 + * must periodically check for this case. Also, remote AMO operations 35 + * do not reliably time out. Thus we do a remote PIO read solely to 36 + * know whether the remote partition is down and whether we should 37 + * stop sending IPIs to it. This remote PIO read operation is set up 38 + * in a special nofault region so SAL knows to ignore (and cleanup) 39 + * any errors due to the remote AMO write, PIO read, and/or PIO 40 + * write operations. 41 + * 42 + * If/when new hardware solves this IPI problem, we should abandon 43 + * the current approach. 44 + * 45 + */ 46 + 47 + 48 + #include <linux/kernel.h> 49 + #include <linux/module.h> 50 + #include <linux/init.h> 51 + #include <linux/sched.h> 52 + #include <linux/syscalls.h> 53 + #include <linux/cache.h> 54 + #include <linux/interrupt.h> 55 + #include <linux/slab.h> 56 + #include <asm/sn/intr.h> 57 + #include <asm/sn/sn_sal.h> 58 + #include <asm/uaccess.h> 59 + #include "xpc.h" 60 + 61 + 62 + /* define two XPC debug device structures to be used with dev_dbg() et al */ 63 + 64 + struct device_driver xpc_dbg_name = { 65 + .name = "xpc" 66 + }; 67 + 68 + struct device xpc_part_dbg_subname = { 69 + .bus_id = {0}, /* set to "part" at xpc_init() time */ 70 + .driver = &xpc_dbg_name 71 + }; 72 + 73 + struct device xpc_chan_dbg_subname = { 74 + .bus_id = {0}, /* set to "chan" at xpc_init() time */ 75 + .driver = &xpc_dbg_name 76 + }; 77 + 78 + struct device *xpc_part = &xpc_part_dbg_subname; 79 + struct device *xpc_chan = &xpc_chan_dbg_subname; 80 + 81 + 82 + /* systune related variables for /proc/sys directories */ 83 + 84 + static int xpc_hb_min = 1; 85 + static int xpc_hb_max = 10; 86 + 87 + static int xpc_hb_check_min = 10; 88 + static int xpc_hb_check_max = 120; 89 + 90 + static ctl_table xpc_sys_xpc_hb_dir[] = { 91 + { 92 + 1, 93 + "hb_interval", 94 + &xpc_hb_interval, 95 + sizeof(int), 96 + 0644, 97 + NULL, 98 + &proc_dointvec_minmax, 99 + &sysctl_intvec, 100 + NULL, 101 + &xpc_hb_min, &xpc_hb_max 102 + }, 103 + { 104 + 2, 105 + "hb_check_interval", 106 + &xpc_hb_check_interval, 107 + sizeof(int), 108 + 0644, 109 + NULL, 110 + &proc_dointvec_minmax, 111 + &sysctl_intvec, 112 + NULL, 113 + &xpc_hb_check_min, &xpc_hb_check_max 114 + }, 115 + {0} 116 + }; 117 + static ctl_table xpc_sys_xpc_dir[] = { 118 + { 119 + 1, 120 + "hb", 121 + NULL, 122 + 0, 123 + 0555, 124 + xpc_sys_xpc_hb_dir 125 + }, 126 + {0} 127 + }; 128 + static ctl_table xpc_sys_dir[] = { 129 + { 130 + 1, 131 + "xpc", 132 + NULL, 133 + 0, 134 + 0555, 135 + xpc_sys_xpc_dir 136 + }, 137 + {0} 138 + }; 139 + static struct ctl_table_header *xpc_sysctl; 140 + 141 + 142 + /* #of IRQs received */ 143 + static atomic_t xpc_act_IRQ_rcvd; 144 + 145 + /* IRQ handler notifies this wait queue on receipt of an IRQ */ 146 + static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); 147 + 148 + static unsigned long xpc_hb_check_timeout; 149 + 150 + /* xpc_hb_checker thread exited notification */ 151 + static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); 152 + 153 + /* xpc_discovery thread exited notification */ 154 + static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); 155 + 156 + 157 + static struct timer_list xpc_hb_timer; 158 + 159 + 160 + static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); 161 + 162 + 163 + /* 164 + * Notify the heartbeat check thread that an IRQ has been received. 165 + */ 166 + static irqreturn_t 167 + xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) 168 + { 169 + atomic_inc(&xpc_act_IRQ_rcvd); 170 + wake_up_interruptible(&xpc_act_IRQ_wq); 171 + return IRQ_HANDLED; 172 + } 173 + 174 + 175 + /* 176 + * Timer to produce the heartbeat. The timer structures function is 177 + * already set when this is initially called. A tunable is used to 178 + * specify when the next timeout should occur. 179 + */ 180 + static void 181 + xpc_hb_beater(unsigned long dummy) 182 + { 183 + xpc_vars->heartbeat++; 184 + 185 + if (jiffies >= xpc_hb_check_timeout) { 186 + wake_up_interruptible(&xpc_act_IRQ_wq); 187 + } 188 + 189 + xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); 190 + add_timer(&xpc_hb_timer); 191 + } 192 + 193 + 194 + /* 195 + * This thread is responsible for nearly all of the partition 196 + * activation/deactivation. 197 + */ 198 + static int 199 + xpc_hb_checker(void *ignore) 200 + { 201 + int last_IRQ_count = 0; 202 + int new_IRQ_count; 203 + int force_IRQ=0; 204 + 205 + 206 + /* this thread was marked active by xpc_hb_init() */ 207 + 208 + daemonize(XPC_HB_CHECK_THREAD_NAME); 209 + 210 + set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 211 + 212 + xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 213 + 214 + while (!(volatile int) xpc_exiting) { 215 + 216 + /* wait for IRQ or timeout */ 217 + (void) wait_event_interruptible(xpc_act_IRQ_wq, 218 + (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || 219 + jiffies >= xpc_hb_check_timeout || 220 + (volatile int) xpc_exiting)); 221 + 222 + dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 223 + "been received\n", 224 + (int) (xpc_hb_check_timeout - jiffies), 225 + atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); 226 + 227 + 228 + /* checking of remote heartbeats is skewed by IRQ handling */ 229 + if (jiffies >= xpc_hb_check_timeout) { 230 + dev_dbg(xpc_part, "checking remote heartbeats\n"); 231 + xpc_check_remote_hb(); 232 + 233 + /* 234 + * We need to periodically recheck to ensure no 235 + * IPI/AMO pairs have been missed. That check 236 + * must always reset xpc_hb_check_timeout. 237 + */ 238 + force_IRQ = 1; 239 + } 240 + 241 + 242 + new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); 243 + if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { 244 + force_IRQ = 0; 245 + 246 + dev_dbg(xpc_part, "found an IRQ to process; will be " 247 + "resetting xpc_hb_check_timeout\n"); 248 + 249 + last_IRQ_count += xpc_identify_act_IRQ_sender(); 250 + if (last_IRQ_count < new_IRQ_count) { 251 + /* retry once to help avoid missing AMO */ 252 + (void) xpc_identify_act_IRQ_sender(); 253 + } 254 + last_IRQ_count = new_IRQ_count; 255 + 256 + xpc_hb_check_timeout = jiffies + 257 + (xpc_hb_check_interval * HZ); 258 + } 259 + } 260 + 261 + dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 262 + 263 + 264 + /* mark this thread as inactive */ 265 + up(&xpc_hb_checker_exited); 266 + return 0; 267 + } 268 + 269 + 270 + /* 271 + * This thread will attempt to discover other partitions to activate 272 + * based on info provided by SAL. This new thread is short lived and 273 + * will exit once discovery is complete. 274 + */ 275 + static int 276 + xpc_initiate_discovery(void *ignore) 277 + { 278 + daemonize(XPC_DISCOVERY_THREAD_NAME); 279 + 280 + xpc_discovery(); 281 + 282 + dev_dbg(xpc_part, "discovery thread is exiting\n"); 283 + 284 + /* mark this thread as inactive */ 285 + up(&xpc_discovery_exited); 286 + return 0; 287 + } 288 + 289 + 290 + /* 291 + * Establish first contact with the remote partititon. This involves pulling 292 + * the XPC per partition variables from the remote partition and waiting for 293 + * the remote partition to pull ours. 294 + */ 295 + static enum xpc_retval 296 + xpc_make_first_contact(struct xpc_partition *part) 297 + { 298 + enum xpc_retval ret; 299 + 300 + 301 + while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { 302 + if (ret != xpcRetry) { 303 + XPC_DEACTIVATE_PARTITION(part, ret); 304 + return ret; 305 + } 306 + 307 + dev_dbg(xpc_chan, "waiting to make first contact with " 308 + "partition %d\n", XPC_PARTID(part)); 309 + 310 + /* wait a 1/4 of a second or so */ 311 + set_current_state(TASK_INTERRUPTIBLE); 312 + (void) schedule_timeout(0.25 * HZ); 313 + 314 + if (part->act_state == XPC_P_DEACTIVATING) { 315 + return part->reason; 316 + } 317 + } 318 + 319 + return xpc_mark_partition_active(part); 320 + } 321 + 322 + 323 + /* 324 + * The first kthread assigned to a newly activated partition is the one 325 + * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to 326 + * that kthread until the partition is brought down, at which time that kthread 327 + * returns back to XPC HB. (The return of that kthread will signify to XPC HB 328 + * that XPC has dismantled all communication infrastructure for the associated 329 + * partition.) This kthread becomes the channel manager for that partition. 330 + * 331 + * Each active partition has a channel manager, who, besides connecting and 332 + * disconnecting channels, will ensure that each of the partition's connected 333 + * channels has the required number of assigned kthreads to get the work done. 334 + */ 335 + static void 336 + xpc_channel_mgr(struct xpc_partition *part) 337 + { 338 + while (part->act_state != XPC_P_DEACTIVATING || 339 + atomic_read(&part->nchannels_active) > 0) { 340 + 341 + xpc_process_channel_activity(part); 342 + 343 + 344 + /* 345 + * Wait until we've been requested to activate kthreads or 346 + * all of the channel's message queues have been torn down or 347 + * a signal is pending. 348 + * 349 + * The channel_mgr_requests is set to 1 after being awakened, 350 + * This is done to prevent the channel mgr from making one pass 351 + * through the loop for each request, since he will 352 + * be servicing all the requests in one pass. The reason it's 353 + * set to 1 instead of 0 is so that other kthreads will know 354 + * that the channel mgr is running and won't bother trying to 355 + * wake him up. 356 + */ 357 + atomic_dec(&part->channel_mgr_requests); 358 + (void) wait_event_interruptible(part->channel_mgr_wq, 359 + (atomic_read(&part->channel_mgr_requests) > 0 || 360 + (volatile u64) part->local_IPI_amo != 0 || 361 + ((volatile u8) part->act_state == 362 + XPC_P_DEACTIVATING && 363 + atomic_read(&part->nchannels_active) == 0))); 364 + atomic_set(&part->channel_mgr_requests, 1); 365 + 366 + // >>> Does it need to wakeup periodically as well? In case we 367 + // >>> miscalculated the #of kthreads to wakeup or create? 368 + } 369 + } 370 + 371 + 372 + /* 373 + * When XPC HB determines that a partition has come up, it will create a new 374 + * kthread and that kthread will call this function to attempt to set up the 375 + * basic infrastructure used for Cross Partition Communication with the newly 376 + * upped partition. 377 + * 378 + * The kthread that was created by XPC HB and which setup the XPC 379 + * infrastructure will remain assigned to the partition until the partition 380 + * goes down. At which time the kthread will teardown the XPC infrastructure 381 + * and then exit. 382 + * 383 + * XPC HB will put the remote partition's XPC per partition specific variables 384 + * physical address into xpc_partitions[partid].remote_vars_part_pa prior to 385 + * calling xpc_partition_up(). 386 + */ 387 + static void 388 + xpc_partition_up(struct xpc_partition *part) 389 + { 390 + DBUG_ON(part->channels != NULL); 391 + 392 + dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); 393 + 394 + if (xpc_setup_infrastructure(part) != xpcSuccess) { 395 + return; 396 + } 397 + 398 + /* 399 + * The kthread that XPC HB called us with will become the 400 + * channel manager for this partition. It will not return 401 + * back to XPC HB until the partition's XPC infrastructure 402 + * has been dismantled. 403 + */ 404 + 405 + (void) xpc_part_ref(part); /* this will always succeed */ 406 + 407 + if (xpc_make_first_contact(part) == xpcSuccess) { 408 + xpc_channel_mgr(part); 409 + } 410 + 411 + xpc_part_deref(part); 412 + 413 + xpc_teardown_infrastructure(part); 414 + } 415 + 416 + 417 + static int 418 + xpc_activating(void *__partid) 419 + { 420 + partid_t partid = (u64) __partid; 421 + struct xpc_partition *part = &xpc_partitions[partid]; 422 + unsigned long irq_flags; 423 + struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 }; 424 + int ret; 425 + 426 + 427 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 428 + 429 + spin_lock_irqsave(&part->act_lock, irq_flags); 430 + 431 + if (part->act_state == XPC_P_DEACTIVATING) { 432 + part->act_state = XPC_P_INACTIVE; 433 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 434 + part->remote_rp_pa = 0; 435 + return 0; 436 + } 437 + 438 + /* indicate the thread is activating */ 439 + DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); 440 + part->act_state = XPC_P_ACTIVATING; 441 + 442 + XPC_SET_REASON(part, 0, 0); 443 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 444 + 445 + dev_dbg(xpc_part, "bringing partition %d up\n", partid); 446 + 447 + daemonize("xpc%02d", partid); 448 + 449 + /* 450 + * This thread needs to run at a realtime priority to prevent a 451 + * significant performance degradation. 452 + */ 453 + ret = sched_setscheduler(current, SCHED_FIFO, &param); 454 + if (ret != 0) { 455 + dev_warn(xpc_part, "unable to set pid %d to a realtime " 456 + "priority, ret=%d\n", current->pid, ret); 457 + } 458 + 459 + /* allow this thread and its children to run on any CPU */ 460 + set_cpus_allowed(current, CPU_MASK_ALL); 461 + 462 + /* 463 + * Register the remote partition's AMOs with SAL so it can handle 464 + * and cleanup errors within that address range should the remote 465 + * partition go down. We don't unregister this range because it is 466 + * difficult to tell when outstanding writes to the remote partition 467 + * are finished and thus when it is safe to unregister. This should 468 + * not result in wasted space in the SAL xp_addr_region table because 469 + * we should get the same page for remote_amos_page_pa after module 470 + * reloads and system reboots. 471 + */ 472 + if (sn_register_xp_addr_region(part->remote_amos_page_pa, 473 + PAGE_SIZE, 1) < 0) { 474 + dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " 475 + "xp_addr region\n", partid); 476 + 477 + spin_lock_irqsave(&part->act_lock, irq_flags); 478 + part->act_state = XPC_P_INACTIVE; 479 + XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); 480 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 481 + part->remote_rp_pa = 0; 482 + return 0; 483 + } 484 + 485 + XPC_ALLOW_HB(partid, xpc_vars); 486 + xpc_IPI_send_activated(part); 487 + 488 + 489 + /* 490 + * xpc_partition_up() holds this thread and marks this partition as 491 + * XPC_P_ACTIVE by calling xpc_hb_mark_active(). 492 + */ 493 + (void) xpc_partition_up(part); 494 + 495 + xpc_mark_partition_inactive(part); 496 + 497 + if (part->reason == xpcReactivating) { 498 + /* interrupting ourselves results in activating partition */ 499 + xpc_IPI_send_reactivate(part); 500 + } 501 + 502 + return 0; 503 + } 504 + 505 + 506 + void 507 + xpc_activate_partition(struct xpc_partition *part) 508 + { 509 + partid_t partid = XPC_PARTID(part); 510 + unsigned long irq_flags; 511 + pid_t pid; 512 + 513 + 514 + spin_lock_irqsave(&part->act_lock, irq_flags); 515 + 516 + pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); 517 + 518 + DBUG_ON(part->act_state != XPC_P_INACTIVE); 519 + 520 + if (pid > 0) { 521 + part->act_state = XPC_P_ACTIVATION_REQ; 522 + XPC_SET_REASON(part, xpcCloneKThread, __LINE__); 523 + } else { 524 + XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); 525 + } 526 + 527 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 528 + } 529 + 530 + 531 + /* 532 + * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 533 + * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 534 + * than one partition, we use an AMO_t structure per partition to indicate 535 + * whether a partition has sent an IPI or not. >>> If it has, then wake up the 536 + * associated kthread to handle it. 537 + * 538 + * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC 539 + * running on other partitions. 540 + * 541 + * Noteworthy Arguments: 542 + * 543 + * irq - Interrupt ReQuest number. NOT USED. 544 + * 545 + * dev_id - partid of IPI's potential sender. 546 + * 547 + * regs - processor's context before the processor entered 548 + * interrupt code. NOT USED. 549 + */ 550 + irqreturn_t 551 + xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) 552 + { 553 + partid_t partid = (partid_t) (u64) dev_id; 554 + struct xpc_partition *part = &xpc_partitions[partid]; 555 + 556 + 557 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 558 + 559 + if (xpc_part_ref(part)) { 560 + xpc_check_for_channel_activity(part); 561 + 562 + xpc_part_deref(part); 563 + } 564 + return IRQ_HANDLED; 565 + } 566 + 567 + 568 + /* 569 + * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor 570 + * because the write to their associated IPI amo completed after the IRQ/IPI 571 + * was received. 572 + */ 573 + void 574 + xpc_dropped_IPI_check(struct xpc_partition *part) 575 + { 576 + if (xpc_part_ref(part)) { 577 + xpc_check_for_channel_activity(part); 578 + 579 + part->dropped_IPI_timer.expires = jiffies + 580 + XPC_P_DROPPED_IPI_WAIT; 581 + add_timer(&part->dropped_IPI_timer); 582 + xpc_part_deref(part); 583 + } 584 + } 585 + 586 + 587 + void 588 + xpc_activate_kthreads(struct xpc_channel *ch, int needed) 589 + { 590 + int idle = atomic_read(&ch->kthreads_idle); 591 + int assigned = atomic_read(&ch->kthreads_assigned); 592 + int wakeup; 593 + 594 + 595 + DBUG_ON(needed <= 0); 596 + 597 + if (idle > 0) { 598 + wakeup = (needed > idle) ? idle : needed; 599 + needed -= wakeup; 600 + 601 + dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " 602 + "channel=%d\n", wakeup, ch->partid, ch->number); 603 + 604 + /* only wakeup the requested number of kthreads */ 605 + wake_up_nr(&ch->idle_wq, wakeup); 606 + } 607 + 608 + if (needed <= 0) { 609 + return; 610 + } 611 + 612 + if (needed + assigned > ch->kthreads_assigned_limit) { 613 + needed = ch->kthreads_assigned_limit - assigned; 614 + // >>>should never be less than 0 615 + if (needed <= 0) { 616 + return; 617 + } 618 + } 619 + 620 + dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 621 + needed, ch->partid, ch->number); 622 + 623 + xpc_create_kthreads(ch, needed); 624 + } 625 + 626 + 627 + /* 628 + * This function is where XPC's kthreads wait for messages to deliver. 629 + */ 630 + static void 631 + xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) 632 + { 633 + do { 634 + /* deliver messages to their intended recipients */ 635 + 636 + while ((volatile s64) ch->w_local_GP.get < 637 + (volatile s64) ch->w_remote_GP.put && 638 + !((volatile u32) ch->flags & 639 + XPC_C_DISCONNECTING)) { 640 + xpc_deliver_msg(ch); 641 + } 642 + 643 + if (atomic_inc_return(&ch->kthreads_idle) > 644 + ch->kthreads_idle_limit) { 645 + /* too many idle kthreads on this channel */ 646 + atomic_dec(&ch->kthreads_idle); 647 + break; 648 + } 649 + 650 + dev_dbg(xpc_chan, "idle kthread calling " 651 + "wait_event_interruptible_exclusive()\n"); 652 + 653 + (void) wait_event_interruptible_exclusive(ch->idle_wq, 654 + ((volatile s64) ch->w_local_GP.get < 655 + (volatile s64) ch->w_remote_GP.put || 656 + ((volatile u32) ch->flags & 657 + XPC_C_DISCONNECTING))); 658 + 659 + atomic_dec(&ch->kthreads_idle); 660 + 661 + } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); 662 + } 663 + 664 + 665 + static int 666 + xpc_daemonize_kthread(void *args) 667 + { 668 + partid_t partid = XPC_UNPACK_ARG1(args); 669 + u16 ch_number = XPC_UNPACK_ARG2(args); 670 + struct xpc_partition *part = &xpc_partitions[partid]; 671 + struct xpc_channel *ch; 672 + int n_needed; 673 + 674 + 675 + daemonize("xpc%02dc%d", partid, ch_number); 676 + 677 + dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", 678 + partid, ch_number); 679 + 680 + ch = &part->channels[ch_number]; 681 + 682 + if (!(ch->flags & XPC_C_DISCONNECTING)) { 683 + DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); 684 + 685 + /* let registerer know that connection has been established */ 686 + 687 + if (atomic_read(&ch->kthreads_assigned) == 1) { 688 + xpc_connected_callout(ch); 689 + 690 + /* 691 + * It is possible that while the callout was being 692 + * made that the remote partition sent some messages. 693 + * If that is the case, we may need to activate 694 + * additional kthreads to help deliver them. We only 695 + * need one less than total #of messages to deliver. 696 + */ 697 + n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 698 + if (n_needed > 0 && 699 + !(ch->flags & XPC_C_DISCONNECTING)) { 700 + xpc_activate_kthreads(ch, n_needed); 701 + } 702 + } 703 + 704 + xpc_kthread_waitmsgs(part, ch); 705 + } 706 + 707 + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 708 + ((ch->flags & XPC_C_CONNECTCALLOUT) || 709 + (ch->reason != xpcUnregistering && 710 + ch->reason != xpcOtherUnregistering))) { 711 + xpc_disconnected_callout(ch); 712 + } 713 + 714 + 715 + xpc_msgqueue_deref(ch); 716 + 717 + dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", 718 + partid, ch_number); 719 + 720 + xpc_part_deref(part); 721 + return 0; 722 + } 723 + 724 + 725 + /* 726 + * For each partition that XPC has established communications with, there is 727 + * a minimum of one kernel thread assigned to perform any operation that 728 + * may potentially sleep or block (basically the callouts to the asynchronous 729 + * functions registered via xpc_connect()). 730 + * 731 + * Additional kthreads are created and destroyed by XPC as the workload 732 + * demands. 733 + * 734 + * A kthread is assigned to one of the active channels that exists for a given 735 + * partition. 736 + */ 737 + void 738 + xpc_create_kthreads(struct xpc_channel *ch, int needed) 739 + { 740 + unsigned long irq_flags; 741 + pid_t pid; 742 + u64 args = XPC_PACK_ARGS(ch->partid, ch->number); 743 + 744 + 745 + while (needed-- > 0) { 746 + pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 747 + if (pid < 0) { 748 + /* the fork failed */ 749 + 750 + if (atomic_read(&ch->kthreads_assigned) < 751 + ch->kthreads_idle_limit) { 752 + /* 753 + * Flag this as an error only if we have an 754 + * insufficient #of kthreads for the channel 755 + * to function. 756 + * 757 + * No xpc_msgqueue_ref() is needed here since 758 + * the channel mgr is doing this. 759 + */ 760 + spin_lock_irqsave(&ch->lock, irq_flags); 761 + XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 762 + &irq_flags); 763 + spin_unlock_irqrestore(&ch->lock, irq_flags); 764 + } 765 + break; 766 + } 767 + 768 + /* 769 + * The following is done on behalf of the newly created 770 + * kthread. That kthread is responsible for doing the 771 + * counterpart to the following before it exits. 772 + */ 773 + (void) xpc_part_ref(&xpc_partitions[ch->partid]); 774 + xpc_msgqueue_ref(ch); 775 + atomic_inc(&ch->kthreads_assigned); 776 + ch->kthreads_created++; // >>> temporary debug only!!! 777 + } 778 + } 779 + 780 + 781 + void 782 + xpc_disconnect_wait(int ch_number) 783 + { 784 + partid_t partid; 785 + struct xpc_partition *part; 786 + struct xpc_channel *ch; 787 + 788 + 789 + /* now wait for all callouts to the caller's function to cease */ 790 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 791 + part = &xpc_partitions[partid]; 792 + 793 + if (xpc_part_ref(part)) { 794 + ch = &part->channels[ch_number]; 795 + 796 + // >>> how do we keep from falling into the window between our check and going 797 + // >>> down and coming back up where sema is re-inited? 798 + if (ch->flags & XPC_C_SETUP) { 799 + (void) down(&ch->teardown_sema); 800 + } 801 + 802 + xpc_part_deref(part); 803 + } 804 + } 805 + } 806 + 807 + 808 + static void 809 + xpc_do_exit(void) 810 + { 811 + partid_t partid; 812 + int active_part_count; 813 + struct xpc_partition *part; 814 + 815 + 816 + /* now it's time to eliminate our heartbeat */ 817 + del_timer_sync(&xpc_hb_timer); 818 + xpc_vars->heartbeating_to_mask = 0; 819 + 820 + /* indicate to others that our reserved page is uninitialized */ 821 + xpc_rsvd_page->vars_pa = 0; 822 + 823 + /* 824 + * Ignore all incoming interrupts. Without interupts the heartbeat 825 + * checker won't activate any new partitions that may come up. 826 + */ 827 + free_irq(SGI_XPC_ACTIVATE, NULL); 828 + 829 + /* 830 + * Cause the heartbeat checker and the discovery threads to exit. 831 + * We don't want them attempting to activate new partitions as we 832 + * try to deactivate the existing ones. 833 + */ 834 + xpc_exiting = 1; 835 + wake_up_interruptible(&xpc_act_IRQ_wq); 836 + 837 + /* wait for the heartbeat checker thread to mark itself inactive */ 838 + down(&xpc_hb_checker_exited); 839 + 840 + /* wait for the discovery thread to mark itself inactive */ 841 + down(&xpc_discovery_exited); 842 + 843 + 844 + set_current_state(TASK_INTERRUPTIBLE); 845 + schedule_timeout(0.3 * HZ); 846 + set_current_state(TASK_RUNNING); 847 + 848 + 849 + /* wait for all partitions to become inactive */ 850 + 851 + do { 852 + active_part_count = 0; 853 + 854 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 855 + part = &xpc_partitions[partid]; 856 + if (part->act_state != XPC_P_INACTIVE) { 857 + active_part_count++; 858 + 859 + XPC_DEACTIVATE_PARTITION(part, xpcUnloading); 860 + } 861 + } 862 + 863 + if (active_part_count) { 864 + set_current_state(TASK_INTERRUPTIBLE); 865 + schedule_timeout(0.3 * HZ); 866 + set_current_state(TASK_RUNNING); 867 + } 868 + 869 + } while (active_part_count > 0); 870 + 871 + 872 + /* close down protections for IPI operations */ 873 + xpc_restrict_IPI_ops(); 874 + 875 + 876 + /* clear the interface to XPC's functions */ 877 + xpc_clear_interface(); 878 + 879 + if (xpc_sysctl) { 880 + unregister_sysctl_table(xpc_sysctl); 881 + } 882 + } 883 + 884 + 885 + int __init 886 + xpc_init(void) 887 + { 888 + int ret; 889 + partid_t partid; 890 + struct xpc_partition *part; 891 + pid_t pid; 892 + 893 + 894 + /* 895 + * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng 896 + * both a partition's reserved page and its XPC variables. Its size was 897 + * based on the size of a reserved page. So we need to ensure that the 898 + * XPC variables will fit as well. 899 + */ 900 + if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) { 901 + dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); 902 + return -EPERM; 903 + } 904 + DBUG_ON((u64) xpc_remote_copy_buffer != 905 + L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer)); 906 + 907 + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); 908 + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); 909 + 910 + xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1); 911 + 912 + /* 913 + * The first few fields of each entry of xpc_partitions[] need to 914 + * be initialized now so that calls to xpc_connect() and 915 + * xpc_disconnect() can be made prior to the activation of any remote 916 + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE 917 + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING 918 + * PARTITION HAS BEEN ACTIVATED. 919 + */ 920 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 921 + part = &xpc_partitions[partid]; 922 + 923 + DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); 924 + 925 + part->act_IRQ_rcvd = 0; 926 + spin_lock_init(&part->act_lock); 927 + part->act_state = XPC_P_INACTIVE; 928 + XPC_SET_REASON(part, 0, 0); 929 + part->setup_state = XPC_P_UNSET; 930 + init_waitqueue_head(&part->teardown_wq); 931 + atomic_set(&part->references, 0); 932 + } 933 + 934 + /* 935 + * Open up protections for IPI operations (and AMO operations on 936 + * Shub 1.1 systems). 937 + */ 938 + xpc_allow_IPI_ops(); 939 + 940 + /* 941 + * Interrupts being processed will increment this atomic variable and 942 + * awaken the heartbeat thread which will process the interrupts. 943 + */ 944 + atomic_set(&xpc_act_IRQ_rcvd, 0); 945 + 946 + /* 947 + * This is safe to do before the xpc_hb_checker thread has started 948 + * because the handler releases a wait queue. If an interrupt is 949 + * received before the thread is waiting, it will not go to sleep, 950 + * but rather immediately process the interrupt. 951 + */ 952 + ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, 953 + "xpc hb", NULL); 954 + if (ret != 0) { 955 + dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " 956 + "errno=%d\n", -ret); 957 + 958 + xpc_restrict_IPI_ops(); 959 + 960 + if (xpc_sysctl) { 961 + unregister_sysctl_table(xpc_sysctl); 962 + } 963 + return -EBUSY; 964 + } 965 + 966 + /* 967 + * Fill the partition reserved page with the information needed by 968 + * other partitions to discover we are alive and establish initial 969 + * communications. 970 + */ 971 + xpc_rsvd_page = xpc_rsvd_page_init(); 972 + if (xpc_rsvd_page == NULL) { 973 + dev_err(xpc_part, "could not setup our reserved page\n"); 974 + 975 + free_irq(SGI_XPC_ACTIVATE, NULL); 976 + xpc_restrict_IPI_ops(); 977 + 978 + if (xpc_sysctl) { 979 + unregister_sysctl_table(xpc_sysctl); 980 + } 981 + return -EBUSY; 982 + } 983 + 984 + 985 + /* 986 + * Set the beating to other partitions into motion. This is 987 + * the last requirement for other partitions' discovery to 988 + * initiate communications with us. 989 + */ 990 + init_timer(&xpc_hb_timer); 991 + xpc_hb_timer.function = xpc_hb_beater; 992 + xpc_hb_beater(0); 993 + 994 + 995 + /* 996 + * The real work-horse behind xpc. This processes incoming 997 + * interrupts and monitors remote heartbeats. 998 + */ 999 + pid = kernel_thread(xpc_hb_checker, NULL, 0); 1000 + if (pid < 0) { 1001 + dev_err(xpc_part, "failed while forking hb check thread\n"); 1002 + 1003 + /* indicate to others that our reserved page is uninitialized */ 1004 + xpc_rsvd_page->vars_pa = 0; 1005 + 1006 + del_timer_sync(&xpc_hb_timer); 1007 + free_irq(SGI_XPC_ACTIVATE, NULL); 1008 + xpc_restrict_IPI_ops(); 1009 + 1010 + if (xpc_sysctl) { 1011 + unregister_sysctl_table(xpc_sysctl); 1012 + } 1013 + return -EBUSY; 1014 + } 1015 + 1016 + 1017 + /* 1018 + * Startup a thread that will attempt to discover other partitions to 1019 + * activate based on info provided by SAL. This new thread is short 1020 + * lived and will exit once discovery is complete. 1021 + */ 1022 + pid = kernel_thread(xpc_initiate_discovery, NULL, 0); 1023 + if (pid < 0) { 1024 + dev_err(xpc_part, "failed while forking discovery thread\n"); 1025 + 1026 + /* mark this new thread as a non-starter */ 1027 + up(&xpc_discovery_exited); 1028 + 1029 + xpc_do_exit(); 1030 + return -EBUSY; 1031 + } 1032 + 1033 + 1034 + /* set the interface to point at XPC's functions */ 1035 + xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, 1036 + xpc_initiate_allocate, xpc_initiate_send, 1037 + xpc_initiate_send_notify, xpc_initiate_received, 1038 + xpc_initiate_partid_to_nasids); 1039 + 1040 + return 0; 1041 + } 1042 + module_init(xpc_init); 1043 + 1044 + 1045 + void __exit 1046 + xpc_exit(void) 1047 + { 1048 + xpc_do_exit(); 1049 + } 1050 + module_exit(xpc_exit); 1051 + 1052 + 1053 + MODULE_AUTHOR("Silicon Graphics, Inc."); 1054 + MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); 1055 + MODULE_LICENSE("GPL"); 1056 + 1057 + module_param(xpc_hb_interval, int, 0); 1058 + MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " 1059 + "heartbeat increments."); 1060 + 1061 + module_param(xpc_hb_check_interval, int, 0); 1062 + MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1063 + "heartbeat checks."); 1064 +
+984
arch/ia64/sn/kernel/xpc_partition.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 + */ 8 + 9 + 10 + /* 11 + * Cross Partition Communication (XPC) partition support. 12 + * 13 + * This is the part of XPC that detects the presence/absence of 14 + * other partitions. It provides a heartbeat and monitors the 15 + * heartbeats of other partitions. 16 + * 17 + */ 18 + 19 + 20 + #include <linux/kernel.h> 21 + #include <linux/sysctl.h> 22 + #include <linux/cache.h> 23 + #include <linux/mmzone.h> 24 + #include <linux/nodemask.h> 25 + #include <asm/sn/bte.h> 26 + #include <asm/sn/intr.h> 27 + #include <asm/sn/sn_sal.h> 28 + #include <asm/sn/nodepda.h> 29 + #include <asm/sn/addrs.h> 30 + #include "xpc.h" 31 + 32 + 33 + /* XPC is exiting flag */ 34 + int xpc_exiting; 35 + 36 + 37 + /* SH_IPI_ACCESS shub register value on startup */ 38 + static u64 xpc_sh1_IPI_access; 39 + static u64 xpc_sh2_IPI_access0; 40 + static u64 xpc_sh2_IPI_access1; 41 + static u64 xpc_sh2_IPI_access2; 42 + static u64 xpc_sh2_IPI_access3; 43 + 44 + 45 + /* original protection values for each node */ 46 + u64 xpc_prot_vec[MAX_COMPACT_NODES]; 47 + 48 + 49 + /* this partition's reserved page */ 50 + struct xpc_rsvd_page *xpc_rsvd_page; 51 + 52 + /* this partition's XPC variables (within the reserved page) */ 53 + struct xpc_vars *xpc_vars; 54 + struct xpc_vars_part *xpc_vars_part; 55 + 56 + 57 + /* 58 + * For performance reasons, each entry of xpc_partitions[] is cacheline 59 + * aligned. And xpc_partitions[] is padded with an additional entry at the 60 + * end so that the last legitimate entry doesn't share its cacheline with 61 + * another variable. 62 + */ 63 + struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 64 + 65 + 66 + /* 67 + * Generic buffer used to store a local copy of the remote partitions 68 + * reserved page or XPC variables. 69 + * 70 + * xpc_discovery runs only once and is a seperate thread that is 71 + * very likely going to be processing in parallel with receiving 72 + * interrupts. 73 + */ 74 + char ____cacheline_aligned 75 + xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE]; 76 + 77 + 78 + /* systune related variables */ 79 + int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; 80 + int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT; 81 + 82 + 83 + /* 84 + * Given a nasid, get the physical address of the partition's reserved page 85 + * for that nasid. This function returns 0 on any error. 86 + */ 87 + static u64 88 + xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size) 89 + { 90 + bte_result_t bte_res; 91 + s64 status; 92 + u64 cookie = 0; 93 + u64 rp_pa = nasid; /* seed with nasid */ 94 + u64 len = 0; 95 + 96 + 97 + while (1) { 98 + 99 + status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, 100 + &len); 101 + 102 + dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" 103 + "0x%016lx, address=0x%016lx, len=0x%016lx\n", 104 + status, cookie, rp_pa, len); 105 + 106 + if (status != SALRET_MORE_PASSES) { 107 + break; 108 + } 109 + 110 + if (len > buf_size) { 111 + dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len); 112 + status = SALRET_ERROR; 113 + break; 114 + } 115 + 116 + bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size, 117 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 118 + if (bte_res != BTE_SUCCESS) { 119 + dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 120 + status = SALRET_ERROR; 121 + break; 122 + } 123 + } 124 + 125 + if (status != SALRET_OK) { 126 + rp_pa = 0; 127 + } 128 + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); 129 + return rp_pa; 130 + } 131 + 132 + 133 + /* 134 + * Fill the partition reserved page with the information needed by 135 + * other partitions to discover we are alive and establish initial 136 + * communications. 137 + */ 138 + struct xpc_rsvd_page * 139 + xpc_rsvd_page_init(void) 140 + { 141 + struct xpc_rsvd_page *rp; 142 + AMO_t *amos_page; 143 + u64 rp_pa, next_cl, nasid_array = 0; 144 + int i, ret; 145 + 146 + 147 + /* get the local reserved page's address */ 148 + 149 + rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), 150 + (u64) xpc_remote_copy_buffer, 151 + XPC_RSVD_PAGE_ALIGNED_SIZE); 152 + if (rp_pa == 0) { 153 + dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 154 + return NULL; 155 + } 156 + rp = (struct xpc_rsvd_page *) __va(rp_pa); 157 + 158 + if (rp->partid != sn_partition_id) { 159 + dev_err(xpc_part, "the reserved page's partid of %d should be " 160 + "%d\n", rp->partid, sn_partition_id); 161 + return NULL; 162 + } 163 + 164 + rp->version = XPC_RP_VERSION; 165 + 166 + /* 167 + * Place the XPC variables on the cache line following the 168 + * reserved page structure. 169 + */ 170 + next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE; 171 + xpc_vars = (struct xpc_vars *) next_cl; 172 + 173 + /* 174 + * Before clearing xpc_vars, see if a page of AMOs had been previously 175 + * allocated. If not we'll need to allocate one and set permissions 176 + * so that cross-partition AMOs are allowed. 177 + * 178 + * The allocated AMO page needs MCA reporting to remain disabled after 179 + * XPC has unloaded. To make this work, we keep a copy of the pointer 180 + * to this page (i.e., amos_page) in the struct xpc_vars structure, 181 + * which is pointed to by the reserved page, and re-use that saved copy 182 + * on subsequent loads of XPC. This AMO page is never freed, and its 183 + * memory protections are never restricted. 184 + */ 185 + if ((amos_page = xpc_vars->amos_page) == NULL) { 186 + amos_page = (AMO_t *) mspec_kalloc_page(0); 187 + if (amos_page == NULL) { 188 + dev_err(xpc_part, "can't allocate page of AMOs\n"); 189 + return NULL; 190 + } 191 + 192 + /* 193 + * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems 194 + * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 195 + */ 196 + if (!enable_shub_wars_1_1()) { 197 + ret = sn_change_memprotect(ia64_tpa((u64) amos_page), 198 + PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, 199 + &nasid_array); 200 + if (ret != 0) { 201 + dev_err(xpc_part, "can't change memory " 202 + "protections\n"); 203 + mspec_kfree_page((unsigned long) amos_page); 204 + return NULL; 205 + } 206 + } 207 + } else if (!IS_AMO_ADDRESS((u64) amos_page)) { 208 + /* 209 + * EFI's XPBOOT can also set amos_page in the reserved page, 210 + * but it happens to leave it as an uncached physical address 211 + * and we need it to be an uncached virtual, so we'll have to 212 + * convert it. 213 + */ 214 + if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { 215 + dev_err(xpc_part, "previously used amos_page address " 216 + "is bad = 0x%p\n", (void *) amos_page); 217 + return NULL; 218 + } 219 + amos_page = (AMO_t *) TO_AMO((u64) amos_page); 220 + } 221 + 222 + memset(xpc_vars, 0, sizeof(struct xpc_vars)); 223 + 224 + /* 225 + * Place the XPC per partition specific variables on the cache line 226 + * following the XPC variables structure. 227 + */ 228 + next_cl += XPC_VARS_ALIGNED_SIZE; 229 + memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) * 230 + XP_MAX_PARTITIONS); 231 + xpc_vars_part = (struct xpc_vars_part *) next_cl; 232 + xpc_vars->vars_part_pa = __pa(next_cl); 233 + 234 + xpc_vars->version = XPC_V_VERSION; 235 + xpc_vars->act_nasid = cpuid_to_nasid(0); 236 + xpc_vars->act_phys_cpuid = cpu_physical_id(0); 237 + xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 238 + 239 + 240 + /* 241 + * Initialize the activation related AMO variables. 242 + */ 243 + xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS); 244 + for (i = 1; i < XP_NASID_MASK_WORDS; i++) { 245 + xpc_IPI_init(i + XP_MAX_PARTITIONS); 246 + } 247 + /* export AMO page's physical address to other partitions */ 248 + xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page); 249 + 250 + /* 251 + * This signifies to the remote partition that our reserved 252 + * page is initialized. 253 + */ 254 + (volatile u64) rp->vars_pa = __pa(xpc_vars); 255 + 256 + return rp; 257 + } 258 + 259 + 260 + /* 261 + * Change protections to allow IPI operations (and AMO operations on 262 + * Shub 1.1 systems). 263 + */ 264 + void 265 + xpc_allow_IPI_ops(void) 266 + { 267 + int node; 268 + int nasid; 269 + 270 + 271 + // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 272 + 273 + if (is_shub2()) { 274 + xpc_sh2_IPI_access0 = 275 + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 276 + xpc_sh2_IPI_access1 = 277 + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 278 + xpc_sh2_IPI_access2 = 279 + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 280 + xpc_sh2_IPI_access3 = 281 + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 282 + 283 + for_each_online_node(node) { 284 + nasid = cnodeid_to_nasid(node); 285 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 286 + -1UL); 287 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 288 + -1UL); 289 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 290 + -1UL); 291 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 292 + -1UL); 293 + } 294 + 295 + } else { 296 + xpc_sh1_IPI_access = 297 + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 298 + 299 + for_each_online_node(node) { 300 + nasid = cnodeid_to_nasid(node); 301 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 302 + -1UL); 303 + 304 + /* 305 + * Since the BIST collides with memory operations on 306 + * SHUB 1.1 sn_change_memprotect() cannot be used. 307 + */ 308 + if (enable_shub_wars_1_1()) { 309 + /* open up everything */ 310 + xpc_prot_vec[node] = (u64) HUB_L((u64 *) 311 + GLOBAL_MMR_ADDR(nasid, 312 + SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 313 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 314 + SH1_MD_DQLP_MMR_DIR_PRIVEC0), 315 + -1UL); 316 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 317 + SH1_MD_DQRP_MMR_DIR_PRIVEC0), 318 + -1UL); 319 + } 320 + } 321 + } 322 + } 323 + 324 + 325 + /* 326 + * Restrict protections to disallow IPI operations (and AMO operations on 327 + * Shub 1.1 systems). 328 + */ 329 + void 330 + xpc_restrict_IPI_ops(void) 331 + { 332 + int node; 333 + int nasid; 334 + 335 + 336 + // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 337 + 338 + if (is_shub2()) { 339 + 340 + for_each_online_node(node) { 341 + nasid = cnodeid_to_nasid(node); 342 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 343 + xpc_sh2_IPI_access0); 344 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 345 + xpc_sh2_IPI_access1); 346 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 347 + xpc_sh2_IPI_access2); 348 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 349 + xpc_sh2_IPI_access3); 350 + } 351 + 352 + } else { 353 + 354 + for_each_online_node(node) { 355 + nasid = cnodeid_to_nasid(node); 356 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 357 + xpc_sh1_IPI_access); 358 + 359 + if (enable_shub_wars_1_1()) { 360 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 361 + SH1_MD_DQLP_MMR_DIR_PRIVEC0), 362 + xpc_prot_vec[node]); 363 + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 364 + SH1_MD_DQRP_MMR_DIR_PRIVEC0), 365 + xpc_prot_vec[node]); 366 + } 367 + } 368 + } 369 + } 370 + 371 + 372 + /* 373 + * At periodic intervals, scan through all active partitions and ensure 374 + * their heartbeat is still active. If not, the partition is deactivated. 375 + */ 376 + void 377 + xpc_check_remote_hb(void) 378 + { 379 + struct xpc_vars *remote_vars; 380 + struct xpc_partition *part; 381 + partid_t partid; 382 + bte_result_t bres; 383 + 384 + 385 + remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 386 + 387 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 388 + if (partid == sn_partition_id) { 389 + continue; 390 + } 391 + 392 + part = &xpc_partitions[partid]; 393 + 394 + if (part->act_state == XPC_P_INACTIVE || 395 + part->act_state == XPC_P_DEACTIVATING) { 396 + continue; 397 + } 398 + 399 + /* pull the remote_hb cache line */ 400 + bres = xp_bte_copy(part->remote_vars_pa, 401 + ia64_tpa((u64) remote_vars), 402 + XPC_VARS_ALIGNED_SIZE, 403 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 404 + if (bres != BTE_SUCCESS) { 405 + XPC_DEACTIVATE_PARTITION(part, 406 + xpc_map_bte_errors(bres)); 407 + continue; 408 + } 409 + 410 + dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat" 411 + " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid, 412 + remote_vars->heartbeat, part->last_heartbeat, 413 + remote_vars->kdb_status, 414 + remote_vars->heartbeating_to_mask); 415 + 416 + if (((remote_vars->heartbeat == part->last_heartbeat) && 417 + (remote_vars->kdb_status == 0)) || 418 + !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { 419 + 420 + XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); 421 + continue; 422 + } 423 + 424 + part->last_heartbeat = remote_vars->heartbeat; 425 + } 426 + } 427 + 428 + 429 + /* 430 + * Get a copy of the remote partition's rsvd page. 431 + * 432 + * remote_rp points to a buffer that is cacheline aligned for BTE copies and 433 + * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE. 434 + */ 435 + static enum xpc_retval 436 + xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 437 + struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa) 438 + { 439 + int bres, i; 440 + 441 + 442 + /* get the reserved page's physical address */ 443 + 444 + *remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp, 445 + XPC_RSVD_PAGE_ALIGNED_SIZE); 446 + if (*remote_rsvd_page_pa == 0) { 447 + return xpcNoRsvdPageAddr; 448 + } 449 + 450 + 451 + /* pull over the reserved page structure */ 452 + 453 + bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp), 454 + XPC_RSVD_PAGE_ALIGNED_SIZE, 455 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 456 + if (bres != BTE_SUCCESS) { 457 + return xpc_map_bte_errors(bres); 458 + } 459 + 460 + 461 + if (discovered_nasids != NULL) { 462 + for (i = 0; i < XP_NASID_MASK_WORDS; i++) { 463 + discovered_nasids[i] |= remote_rp->part_nasids[i]; 464 + } 465 + } 466 + 467 + 468 + /* check that the partid is for another partition */ 469 + 470 + if (remote_rp->partid < 1 || 471 + remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 472 + return xpcInvalidPartid; 473 + } 474 + 475 + if (remote_rp->partid == sn_partition_id) { 476 + return xpcLocalPartid; 477 + } 478 + 479 + 480 + if (XPC_VERSION_MAJOR(remote_rp->version) != 481 + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 482 + return xpcBadVersion; 483 + } 484 + 485 + return xpcSuccess; 486 + } 487 + 488 + 489 + /* 490 + * Get a copy of the remote partition's XPC variables. 491 + * 492 + * remote_vars points to a buffer that is cacheline aligned for BTE copies and 493 + * assumed to be of size XPC_VARS_ALIGNED_SIZE. 494 + */ 495 + static enum xpc_retval 496 + xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) 497 + { 498 + int bres; 499 + 500 + 501 + if (remote_vars_pa == 0) { 502 + return xpcVarsNotSet; 503 + } 504 + 505 + 506 + /* pull over the cross partition variables */ 507 + 508 + bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), 509 + XPC_VARS_ALIGNED_SIZE, 510 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 511 + if (bres != BTE_SUCCESS) { 512 + return xpc_map_bte_errors(bres); 513 + } 514 + 515 + if (XPC_VERSION_MAJOR(remote_vars->version) != 516 + XPC_VERSION_MAJOR(XPC_V_VERSION)) { 517 + return xpcBadVersion; 518 + } 519 + 520 + return xpcSuccess; 521 + } 522 + 523 + 524 + /* 525 + * Prior code has determine the nasid which generated an IPI. Inspect 526 + * that nasid to determine if its partition needs to be activated or 527 + * deactivated. 528 + * 529 + * A partition is consider "awaiting activation" if our partition 530 + * flags indicate it is not active and it has a heartbeat. A 531 + * partition is considered "awaiting deactivation" if our partition 532 + * flags indicate it is active but it has no heartbeat or it is not 533 + * sending its heartbeat to us. 534 + * 535 + * To determine the heartbeat, the remote nasid must have a properly 536 + * initialized reserved page. 537 + */ 538 + static void 539 + xpc_identify_act_IRQ_req(int nasid) 540 + { 541 + struct xpc_rsvd_page *remote_rp; 542 + struct xpc_vars *remote_vars; 543 + u64 remote_rsvd_page_pa; 544 + u64 remote_vars_pa; 545 + partid_t partid; 546 + struct xpc_partition *part; 547 + enum xpc_retval ret; 548 + 549 + 550 + /* pull over the reserved page structure */ 551 + 552 + remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; 553 + 554 + ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa); 555 + if (ret != xpcSuccess) { 556 + dev_warn(xpc_part, "unable to get reserved page from nasid %d, " 557 + "which sent interrupt, reason=%d\n", nasid, ret); 558 + return; 559 + } 560 + 561 + remote_vars_pa = remote_rp->vars_pa; 562 + partid = remote_rp->partid; 563 + part = &xpc_partitions[partid]; 564 + 565 + 566 + /* pull over the cross partition variables */ 567 + 568 + remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 569 + 570 + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 571 + if (ret != xpcSuccess) { 572 + 573 + dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 574 + "which sent interrupt, reason=%d\n", nasid, ret); 575 + 576 + XPC_DEACTIVATE_PARTITION(part, ret); 577 + return; 578 + } 579 + 580 + 581 + part->act_IRQ_rcvd++; 582 + 583 + dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 584 + "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, 585 + remote_vars->heartbeat, remote_vars->heartbeating_to_mask); 586 + 587 + 588 + if (part->act_state == XPC_P_INACTIVE) { 589 + 590 + part->remote_rp_pa = remote_rsvd_page_pa; 591 + dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", 592 + part->remote_rp_pa); 593 + 594 + part->remote_vars_pa = remote_vars_pa; 595 + dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", 596 + part->remote_vars_pa); 597 + 598 + part->last_heartbeat = remote_vars->heartbeat; 599 + dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", 600 + part->last_heartbeat); 601 + 602 + part->remote_vars_part_pa = remote_vars->vars_part_pa; 603 + dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", 604 + part->remote_vars_part_pa); 605 + 606 + part->remote_act_nasid = remote_vars->act_nasid; 607 + dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n", 608 + part->remote_act_nasid); 609 + 610 + part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid; 611 + dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n", 612 + part->remote_act_phys_cpuid); 613 + 614 + part->remote_amos_page_pa = remote_vars->amos_page_pa; 615 + dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", 616 + part->remote_amos_page_pa); 617 + 618 + xpc_activate_partition(part); 619 + 620 + } else if (part->remote_amos_page_pa != remote_vars->amos_page_pa || 621 + !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { 622 + 623 + part->reactivate_nasid = nasid; 624 + XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 625 + } 626 + } 627 + 628 + 629 + /* 630 + * Loop through the activation AMO variables and process any bits 631 + * which are set. Each bit indicates a nasid sending a partition 632 + * activation or deactivation request. 633 + * 634 + * Return #of IRQs detected. 635 + */ 636 + int 637 + xpc_identify_act_IRQ_sender(void) 638 + { 639 + int word, bit; 640 + u64 nasid_mask; 641 + u64 nasid; /* remote nasid */ 642 + int n_IRQs_detected = 0; 643 + AMO_t *act_amos; 644 + struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page; 645 + 646 + 647 + act_amos = xpc_vars->act_amos; 648 + 649 + 650 + /* scan through act AMO variable looking for non-zero entries */ 651 + for (word = 0; word < XP_NASID_MASK_WORDS; word++) { 652 + 653 + nasid_mask = xpc_IPI_receive(&act_amos[word]); 654 + if (nasid_mask == 0) { 655 + /* no IRQs from nasids in this variable */ 656 + continue; 657 + } 658 + 659 + dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, 660 + nasid_mask); 661 + 662 + 663 + /* 664 + * If this nasid has been added to the machine since 665 + * our partition was reset, this will retain the 666 + * remote nasid in our reserved pages machine mask. 667 + * This is used in the event of module reload. 668 + */ 669 + rp->mach_nasids[word] |= nasid_mask; 670 + 671 + 672 + /* locate the nasid(s) which sent interrupts */ 673 + 674 + for (bit = 0; bit < (8 * sizeof(u64)); bit++) { 675 + if (nasid_mask & (1UL << bit)) { 676 + n_IRQs_detected++; 677 + nasid = XPC_NASID_FROM_W_B(word, bit); 678 + dev_dbg(xpc_part, "interrupt from nasid %ld\n", 679 + nasid); 680 + xpc_identify_act_IRQ_req(nasid); 681 + } 682 + } 683 + } 684 + return n_IRQs_detected; 685 + } 686 + 687 + 688 + /* 689 + * Mark specified partition as active. 690 + */ 691 + enum xpc_retval 692 + xpc_mark_partition_active(struct xpc_partition *part) 693 + { 694 + unsigned long irq_flags; 695 + enum xpc_retval ret; 696 + 697 + 698 + dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 699 + 700 + spin_lock_irqsave(&part->act_lock, irq_flags); 701 + if (part->act_state == XPC_P_ACTIVATING) { 702 + part->act_state = XPC_P_ACTIVE; 703 + ret = xpcSuccess; 704 + } else { 705 + DBUG_ON(part->reason == xpcSuccess); 706 + ret = part->reason; 707 + } 708 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 709 + 710 + return ret; 711 + } 712 + 713 + 714 + /* 715 + * Notify XPC that the partition is down. 716 + */ 717 + void 718 + xpc_deactivate_partition(const int line, struct xpc_partition *part, 719 + enum xpc_retval reason) 720 + { 721 + unsigned long irq_flags; 722 + partid_t partid = XPC_PARTID(part); 723 + 724 + 725 + spin_lock_irqsave(&part->act_lock, irq_flags); 726 + 727 + if (part->act_state == XPC_P_INACTIVE) { 728 + XPC_SET_REASON(part, reason, line); 729 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 730 + if (reason == xpcReactivating) { 731 + /* we interrupt ourselves to reactivate partition */ 732 + xpc_IPI_send_reactivate(part); 733 + } 734 + return; 735 + } 736 + if (part->act_state == XPC_P_DEACTIVATING) { 737 + if ((part->reason == xpcUnloading && reason != xpcUnloading) || 738 + reason == xpcReactivating) { 739 + XPC_SET_REASON(part, reason, line); 740 + } 741 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 742 + return; 743 + } 744 + 745 + part->act_state = XPC_P_DEACTIVATING; 746 + XPC_SET_REASON(part, reason, line); 747 + 748 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 749 + 750 + XPC_DISALLOW_HB(partid, xpc_vars); 751 + 752 + dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid, 753 + reason); 754 + 755 + xpc_partition_down(part, reason); 756 + } 757 + 758 + 759 + /* 760 + * Mark specified partition as active. 761 + */ 762 + void 763 + xpc_mark_partition_inactive(struct xpc_partition *part) 764 + { 765 + unsigned long irq_flags; 766 + 767 + 768 + dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", 769 + XPC_PARTID(part)); 770 + 771 + spin_lock_irqsave(&part->act_lock, irq_flags); 772 + part->act_state = XPC_P_INACTIVE; 773 + spin_unlock_irqrestore(&part->act_lock, irq_flags); 774 + part->remote_rp_pa = 0; 775 + } 776 + 777 + 778 + /* 779 + * SAL has provided a partition and machine mask. The partition mask 780 + * contains a bit for each even nasid in our partition. The machine 781 + * mask contains a bit for each even nasid in the entire machine. 782 + * 783 + * Using those two bit arrays, we can determine which nasids are 784 + * known in the machine. Each should also have a reserved page 785 + * initialized if they are available for partitioning. 786 + */ 787 + void 788 + xpc_discovery(void) 789 + { 790 + void *remote_rp_base; 791 + struct xpc_rsvd_page *remote_rp; 792 + struct xpc_vars *remote_vars; 793 + u64 remote_rsvd_page_pa; 794 + u64 remote_vars_pa; 795 + int region; 796 + int max_regions; 797 + int nasid; 798 + struct xpc_rsvd_page *rp; 799 + partid_t partid; 800 + struct xpc_partition *part; 801 + u64 *discovered_nasids; 802 + enum xpc_retval ret; 803 + 804 + 805 + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE, 806 + GFP_KERNEL, &remote_rp_base); 807 + if (remote_rp == NULL) { 808 + return; 809 + } 810 + remote_vars = (struct xpc_vars *) remote_rp; 811 + 812 + 813 + discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS, 814 + GFP_KERNEL); 815 + if (discovered_nasids == NULL) { 816 + kfree(remote_rp_base); 817 + return; 818 + } 819 + memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS); 820 + 821 + rp = (struct xpc_rsvd_page *) xpc_rsvd_page; 822 + 823 + /* 824 + * The term 'region' in this context refers to the minimum number of 825 + * nodes that can comprise an access protection grouping. The access 826 + * protection is in regards to memory, IOI and IPI. 827 + */ 828 + //>>> move the next two #defines into either include/asm-ia64/sn/arch.h or 829 + //>>> include/asm-ia64/sn/addrs.h 830 + #define SH1_MAX_REGIONS 64 831 + #define SH2_MAX_REGIONS 256 832 + max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS; 833 + 834 + for (region = 0; region < max_regions; region++) { 835 + 836 + if ((volatile int) xpc_exiting) { 837 + break; 838 + } 839 + 840 + dev_dbg(xpc_part, "searching region %d\n", region); 841 + 842 + for (nasid = (region * sn_region_size * 2); 843 + nasid < ((region + 1) * sn_region_size * 2); 844 + nasid += 2) { 845 + 846 + if ((volatile int) xpc_exiting) { 847 + break; 848 + } 849 + 850 + dev_dbg(xpc_part, "checking nasid %d\n", nasid); 851 + 852 + 853 + if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) { 854 + dev_dbg(xpc_part, "PROM indicates Nasid %d is " 855 + "part of the local partition; skipping " 856 + "region\n", nasid); 857 + break; 858 + } 859 + 860 + if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) { 861 + dev_dbg(xpc_part, "PROM indicates Nasid %d was " 862 + "not on Numa-Link network at reset\n", 863 + nasid); 864 + continue; 865 + } 866 + 867 + if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { 868 + dev_dbg(xpc_part, "Nasid %d is part of a " 869 + "partition which was previously " 870 + "discovered\n", nasid); 871 + continue; 872 + } 873 + 874 + 875 + /* pull over the reserved page structure */ 876 + 877 + ret = xpc_get_remote_rp(nasid, discovered_nasids, 878 + remote_rp, &remote_rsvd_page_pa); 879 + if (ret != xpcSuccess) { 880 + dev_dbg(xpc_part, "unable to get reserved page " 881 + "from nasid %d, reason=%d\n", nasid, 882 + ret); 883 + 884 + if (ret == xpcLocalPartid) { 885 + break; 886 + } 887 + continue; 888 + } 889 + 890 + remote_vars_pa = remote_rp->vars_pa; 891 + 892 + partid = remote_rp->partid; 893 + part = &xpc_partitions[partid]; 894 + 895 + 896 + /* pull over the cross partition variables */ 897 + 898 + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 899 + if (ret != xpcSuccess) { 900 + dev_dbg(xpc_part, "unable to get XPC variables " 901 + "from nasid %d, reason=%d\n", nasid, 902 + ret); 903 + 904 + XPC_DEACTIVATE_PARTITION(part, ret); 905 + continue; 906 + } 907 + 908 + if (part->act_state != XPC_P_INACTIVE) { 909 + dev_dbg(xpc_part, "partition %d on nasid %d is " 910 + "already activating\n", partid, nasid); 911 + break; 912 + } 913 + 914 + /* 915 + * Register the remote partition's AMOs with SAL so it 916 + * can handle and cleanup errors within that address 917 + * range should the remote partition go down. We don't 918 + * unregister this range because it is difficult to 919 + * tell when outstanding writes to the remote partition 920 + * are finished and thus when it is thus safe to 921 + * unregister. This should not result in wasted space 922 + * in the SAL xp_addr_region table because we should 923 + * get the same page for remote_act_amos_pa after 924 + * module reloads and system reboots. 925 + */ 926 + if (sn_register_xp_addr_region( 927 + remote_vars->amos_page_pa, 928 + PAGE_SIZE, 1) < 0) { 929 + dev_dbg(xpc_part, "partition %d failed to " 930 + "register xp_addr region 0x%016lx\n", 931 + partid, remote_vars->amos_page_pa); 932 + 933 + XPC_SET_REASON(part, xpcPhysAddrRegFailed, 934 + __LINE__); 935 + break; 936 + } 937 + 938 + /* 939 + * The remote nasid is valid and available. 940 + * Send an interrupt to that nasid to notify 941 + * it that we are ready to begin activation. 942 + */ 943 + dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, " 944 + "nasid %d, phys_cpuid 0x%x\n", 945 + remote_vars->amos_page_pa, 946 + remote_vars->act_nasid, 947 + remote_vars->act_phys_cpuid); 948 + 949 + xpc_IPI_send_activate(remote_vars); 950 + } 951 + } 952 + 953 + kfree(discovered_nasids); 954 + kfree(remote_rp_base); 955 + } 956 + 957 + 958 + /* 959 + * Given a partid, get the nasids owned by that partition from the 960 + * remote partition's reserved page. 961 + */ 962 + enum xpc_retval 963 + xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) 964 + { 965 + struct xpc_partition *part; 966 + u64 part_nasid_pa; 967 + int bte_res; 968 + 969 + 970 + part = &xpc_partitions[partid]; 971 + if (part->remote_rp_pa == 0) { 972 + return xpcPartitionDown; 973 + } 974 + 975 + part_nasid_pa = part->remote_rp_pa + 976 + (u64) &((struct xpc_rsvd_page *) 0)->part_nasids; 977 + 978 + bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), 979 + L1_CACHE_ALIGN(XP_NASID_MASK_BYTES), 980 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 981 + 982 + return xpc_map_bte_errors(bte_res); 983 + } 984 +
+715
arch/ia64/sn/kernel/xpnet.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. 7 + */ 8 + 9 + 10 + /* 11 + * Cross Partition Network Interface (XPNET) support 12 + * 13 + * XPNET provides a virtual network layered on top of the Cross 14 + * Partition communication layer. 15 + * 16 + * XPNET provides direct point-to-point and broadcast-like support 17 + * for an ethernet-like device. The ethernet broadcast medium is 18 + * replaced with a point-to-point message structure which passes 19 + * pointers to a DMA-capable block that a remote partition should 20 + * retrieve and pass to the upper level networking layer. 21 + * 22 + */ 23 + 24 + 25 + #include <linux/config.h> 26 + #include <linux/module.h> 27 + #include <linux/kernel.h> 28 + #include <linux/pci.h> 29 + #include <linux/init.h> 30 + #include <linux/ioport.h> 31 + #include <linux/netdevice.h> 32 + #include <linux/etherdevice.h> 33 + #include <linux/delay.h> 34 + #include <linux/ethtool.h> 35 + #include <linux/mii.h> 36 + #include <linux/smp.h> 37 + #include <linux/string.h> 38 + #include <asm/sn/bte.h> 39 + #include <asm/sn/io.h> 40 + #include <asm/sn/sn_sal.h> 41 + #include <asm/types.h> 42 + #include <asm/atomic.h> 43 + #include <asm/sn/xp.h> 44 + 45 + 46 + /* 47 + * The message payload transferred by XPC. 48 + * 49 + * buf_pa is the physical address where the DMA should pull from. 50 + * 51 + * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a 52 + * cacheline boundary. To accomplish this, we record the number of 53 + * bytes from the beginning of the first cacheline to the first useful 54 + * byte of the skb (leadin_ignore) and the number of bytes from the 55 + * last useful byte of the skb to the end of the last cacheline 56 + * (tailout_ignore). 57 + * 58 + * size is the number of bytes to transfer which includes the skb->len 59 + * (useful bytes of the senders skb) plus the leadin and tailout 60 + */ 61 + struct xpnet_message { 62 + u16 version; /* Version for this message */ 63 + u16 embedded_bytes; /* #of bytes embedded in XPC message */ 64 + u32 magic; /* Special number indicating this is xpnet */ 65 + u64 buf_pa; /* phys address of buffer to retrieve */ 66 + u32 size; /* #of bytes in buffer */ 67 + u8 leadin_ignore; /* #of bytes to ignore at the beginning */ 68 + u8 tailout_ignore; /* #of bytes to ignore at the end */ 69 + unsigned char data; /* body of small packets */ 70 + }; 71 + 72 + /* 73 + * Determine the size of our message, the cacheline aligned size, 74 + * and then the number of message will request from XPC. 75 + * 76 + * XPC expects each message to exist in an individual cacheline. 77 + */ 78 + #define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) 79 + #define XPNET_MSG_DATA_MAX \ 80 + (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) 81 + #define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) 82 + #define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) 83 + 84 + 85 + #define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) 86 + #define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) 87 + 88 + /* 89 + * Version number of XPNET implementation. XPNET can always talk to versions 90 + * with same major #, and never talk to versions with a different version. 91 + */ 92 + #define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) 93 + #define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) 94 + #define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) 95 + 96 + #define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ 97 + #define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ 98 + #define XPNET_MAGIC 0x88786984 /* "XNET" */ 99 + 100 + #define XPNET_VALID_MSG(_m) \ 101 + ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ 102 + && (msg->magic == XPNET_MAGIC)) 103 + 104 + #define XPNET_DEVICE_NAME "xp0" 105 + 106 + 107 + /* 108 + * When messages are queued with xpc_send_notify, a kmalloc'd buffer 109 + * of the following type is passed as a notification cookie. When the 110 + * notification function is called, we use the cookie to decide 111 + * whether all outstanding message sends have completed. The skb can 112 + * then be released. 113 + */ 114 + struct xpnet_pending_msg { 115 + struct list_head free_list; 116 + struct sk_buff *skb; 117 + atomic_t use_count; 118 + }; 119 + 120 + /* driver specific structure pointed to by the device structure */ 121 + struct xpnet_dev_private { 122 + struct net_device_stats stats; 123 + }; 124 + 125 + struct net_device *xpnet_device; 126 + 127 + /* 128 + * When we are notified of other partitions activating, we add them to 129 + * our bitmask of partitions to which we broadcast. 130 + */ 131 + static u64 xpnet_broadcast_partitions; 132 + /* protect above */ 133 + static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED; 134 + 135 + /* 136 + * Since the Block Transfer Engine (BTE) is being used for the transfer 137 + * and it relies upon cache-line size transfers, we need to reserve at 138 + * least one cache-line for head and tail alignment. The BTE is 139 + * limited to 8MB transfers. 140 + * 141 + * Testing has shown that changing MTU to greater than 64KB has no effect 142 + * on TCP as the two sides negotiate a Max Segment Size that is limited 143 + * to 64K. Other protocols May use packets greater than this, but for 144 + * now, the default is 64KB. 145 + */ 146 + #define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) 147 + /* 32KB has been determined to be the ideal */ 148 + #define XPNET_DEF_MTU (0x8000UL) 149 + 150 + 151 + /* 152 + * The partition id is encapsulated in the MAC address. The following 153 + * define locates the octet the partid is in. 154 + */ 155 + #define XPNET_PARTID_OCTET 1 156 + #define XPNET_LICENSE_OCTET 2 157 + 158 + 159 + /* 160 + * Define the XPNET debug device structure that is to be used with dev_dbg(), 161 + * dev_err(), dev_warn(), and dev_info(). 162 + */ 163 + struct device_driver xpnet_dbg_name = { 164 + .name = "xpnet" 165 + }; 166 + 167 + struct device xpnet_dbg_subname = { 168 + .bus_id = {0}, /* set to "" */ 169 + .driver = &xpnet_dbg_name 170 + }; 171 + 172 + struct device *xpnet = &xpnet_dbg_subname; 173 + 174 + /* 175 + * Packet was recevied by XPC and forwarded to us. 176 + */ 177 + static void 178 + xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) 179 + { 180 + struct sk_buff *skb; 181 + bte_result_t bret; 182 + struct xpnet_dev_private *priv = 183 + (struct xpnet_dev_private *) xpnet_device->priv; 184 + 185 + 186 + if (!XPNET_VALID_MSG(msg)) { 187 + /* 188 + * Packet with a different XPC version. Ignore. 189 + */ 190 + xpc_received(partid, channel, (void *) msg); 191 + 192 + priv->stats.rx_errors++; 193 + 194 + return; 195 + } 196 + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, 197 + msg->leadin_ignore, msg->tailout_ignore); 198 + 199 + 200 + /* reserve an extra cache line */ 201 + skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); 202 + if (!skb) { 203 + dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", 204 + msg->size + L1_CACHE_BYTES); 205 + 206 + xpc_received(partid, channel, (void *) msg); 207 + 208 + priv->stats.rx_errors++; 209 + 210 + return; 211 + } 212 + 213 + /* 214 + * The allocated skb has some reserved space. 215 + * In order to use bte_copy, we need to get the 216 + * skb->data pointer moved forward. 217 + */ 218 + skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & 219 + (L1_CACHE_BYTES - 1)) + 220 + msg->leadin_ignore)); 221 + 222 + /* 223 + * Update the tail pointer to indicate data actually 224 + * transferred. 225 + */ 226 + skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); 227 + 228 + /* 229 + * Move the data over from the the other side. 230 + */ 231 + if ((XPNET_VERSION_MINOR(msg->version) == 1) && 232 + (msg->embedded_bytes != 0)) { 233 + dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " 234 + "%lu)\n", skb->data, &msg->data, 235 + (size_t) msg->embedded_bytes); 236 + 237 + memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes); 238 + } else { 239 + dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" 240 + "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, 241 + (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), 242 + msg->size); 243 + 244 + bret = bte_copy(msg->buf_pa, 245 + __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), 246 + msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 247 + 248 + if (bret != BTE_SUCCESS) { 249 + // >>> Need better way of cleaning skb. Currently skb 250 + // >>> appears in_use and we can't just call 251 + // >>> dev_kfree_skb. 252 + dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " 253 + "error=0x%x\n", (void *)msg->buf_pa, 254 + (void *)__pa((u64)skb->data & 255 + ~(L1_CACHE_BYTES - 1)), 256 + msg->size, bret); 257 + 258 + xpc_received(partid, channel, (void *) msg); 259 + 260 + priv->stats.rx_errors++; 261 + 262 + return; 263 + } 264 + } 265 + 266 + dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 267 + "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 268 + (void *) skb->data, (void *) skb->tail, (void *) skb->end, 269 + skb->len); 270 + 271 + skb->dev = xpnet_device; 272 + skb->protocol = eth_type_trans(skb, xpnet_device); 273 + skb->ip_summed = CHECKSUM_UNNECESSARY; 274 + 275 + dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " 276 + "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", 277 + (void *) skb->head, (void *) skb->data, (void *) skb->tail, 278 + (void *) skb->end, skb->len); 279 + 280 + 281 + xpnet_device->last_rx = jiffies; 282 + priv->stats.rx_packets++; 283 + priv->stats.rx_bytes += skb->len + ETH_HLEN; 284 + 285 + netif_rx_ni(skb); 286 + xpc_received(partid, channel, (void *) msg); 287 + } 288 + 289 + 290 + /* 291 + * This is the handler which XPC calls during any sort of change in 292 + * state or message reception on a connection. 293 + */ 294 + static void 295 + xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, 296 + void *data, void *key) 297 + { 298 + long bp; 299 + 300 + 301 + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 302 + DBUG_ON(channel != XPC_NET_CHANNEL); 303 + 304 + switch(reason) { 305 + case xpcMsgReceived: /* message received */ 306 + DBUG_ON(data == NULL); 307 + 308 + xpnet_receive(partid, channel, (struct xpnet_message *) data); 309 + break; 310 + 311 + case xpcConnected: /* connection completed to a partition */ 312 + spin_lock_bh(&xpnet_broadcast_lock); 313 + xpnet_broadcast_partitions |= 1UL << (partid -1 ); 314 + bp = xpnet_broadcast_partitions; 315 + spin_unlock_bh(&xpnet_broadcast_lock); 316 + 317 + netif_carrier_on(xpnet_device); 318 + 319 + dev_dbg(xpnet, "%s connection created to partition %d; " 320 + "xpnet_broadcast_partitions=0x%lx\n", 321 + xpnet_device->name, partid, bp); 322 + break; 323 + 324 + default: 325 + spin_lock_bh(&xpnet_broadcast_lock); 326 + xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); 327 + bp = xpnet_broadcast_partitions; 328 + spin_unlock_bh(&xpnet_broadcast_lock); 329 + 330 + if (bp == 0) { 331 + netif_carrier_off(xpnet_device); 332 + } 333 + 334 + dev_dbg(xpnet, "%s disconnected from partition %d; " 335 + "xpnet_broadcast_partitions=0x%lx\n", 336 + xpnet_device->name, partid, bp); 337 + break; 338 + 339 + } 340 + } 341 + 342 + 343 + static int 344 + xpnet_dev_open(struct net_device *dev) 345 + { 346 + enum xpc_retval ret; 347 + 348 + 349 + dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, " 350 + "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity, 351 + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, 352 + XPNET_MAX_IDLE_KTHREADS); 353 + 354 + ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, 355 + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, 356 + XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); 357 + if (ret != xpcSuccess) { 358 + dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " 359 + "ret=%d\n", dev->name, ret); 360 + 361 + return -ENOMEM; 362 + } 363 + 364 + dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); 365 + 366 + return 0; 367 + } 368 + 369 + 370 + static int 371 + xpnet_dev_stop(struct net_device *dev) 372 + { 373 + xpc_disconnect(XPC_NET_CHANNEL); 374 + 375 + dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); 376 + 377 + return 0; 378 + } 379 + 380 + 381 + static int 382 + xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) 383 + { 384 + /* 68 comes from min TCP+IP+MAC header */ 385 + if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) { 386 + dev_err(xpnet, "ifconfig %s mtu %d failed; value must be " 387 + "between 68 and %ld\n", dev->name, new_mtu, 388 + XPNET_MAX_MTU); 389 + return -EINVAL; 390 + } 391 + 392 + dev->mtu = new_mtu; 393 + dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu); 394 + return 0; 395 + } 396 + 397 + 398 + /* 399 + * Required for the net_device structure. 400 + */ 401 + static int 402 + xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) 403 + { 404 + return 0; 405 + } 406 + 407 + 408 + /* 409 + * Return statistics to the caller. 410 + */ 411 + static struct net_device_stats * 412 + xpnet_dev_get_stats(struct net_device *dev) 413 + { 414 + struct xpnet_dev_private *priv; 415 + 416 + 417 + priv = (struct xpnet_dev_private *) dev->priv; 418 + 419 + return &priv->stats; 420 + } 421 + 422 + 423 + /* 424 + * Notification that the other end has received the message and 425 + * DMA'd the skb information. At this point, they are done with 426 + * our side. When all recipients are done processing, we 427 + * release the skb and then release our pending message structure. 428 + */ 429 + static void 430 + xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, 431 + void *__qm) 432 + { 433 + struct xpnet_pending_msg *queued_msg = 434 + (struct xpnet_pending_msg *) __qm; 435 + 436 + 437 + DBUG_ON(queued_msg == NULL); 438 + 439 + dev_dbg(xpnet, "message to %d notified with reason %d\n", 440 + partid, reason); 441 + 442 + if (atomic_dec_return(&queued_msg->use_count) == 0) { 443 + dev_dbg(xpnet, "all acks for skb->head=-x%p\n", 444 + (void *) queued_msg->skb->head); 445 + 446 + dev_kfree_skb_any(queued_msg->skb); 447 + kfree(queued_msg); 448 + } 449 + } 450 + 451 + 452 + /* 453 + * Network layer has formatted a packet (skb) and is ready to place it 454 + * "on the wire". Prepare and send an xpnet_message to all partitions 455 + * which have connected with us and are targets of this packet. 456 + * 457 + * MAC-NOTE: For the XPNET driver, the MAC address contains the 458 + * destination partition_id. If the destination partition id word 459 + * is 0xff, this packet is to broadcast to all partitions. 460 + */ 461 + static int 462 + xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 463 + { 464 + struct xpnet_pending_msg *queued_msg; 465 + enum xpc_retval ret; 466 + struct xpnet_message *msg; 467 + u64 start_addr, end_addr; 468 + long dp; 469 + u8 second_mac_octet; 470 + partid_t dest_partid; 471 + struct xpnet_dev_private *priv; 472 + u16 embedded_bytes; 473 + 474 + 475 + priv = (struct xpnet_dev_private *) dev->priv; 476 + 477 + 478 + dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 479 + "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 480 + (void *) skb->data, (void *) skb->tail, (void *) skb->end, 481 + skb->len); 482 + 483 + 484 + /* 485 + * The xpnet_pending_msg tracks how many outstanding 486 + * xpc_send_notifies are relying on this skb. When none 487 + * remain, release the skb. 488 + */ 489 + queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); 490 + if (queued_msg == NULL) { 491 + dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " 492 + "packet\n", sizeof(struct xpnet_pending_msg)); 493 + 494 + priv->stats.tx_errors++; 495 + 496 + return -ENOMEM; 497 + } 498 + 499 + 500 + /* get the beginning of the first cacheline and end of last */ 501 + start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); 502 + end_addr = L1_CACHE_ALIGN((u64) skb->tail); 503 + 504 + /* calculate how many bytes to embed in the XPC message */ 505 + embedded_bytes = 0; 506 + if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { 507 + /* skb->data does fit so embed */ 508 + embedded_bytes = skb->len; 509 + } 510 + 511 + 512 + /* 513 + * Since the send occurs asynchronously, we set the count to one 514 + * and begin sending. Any sends that happen to complete before 515 + * we are done sending will not free the skb. We will be left 516 + * with that task during exit. This also handles the case of 517 + * a packet destined for a partition which is no longer up. 518 + */ 519 + atomic_set(&queued_msg->use_count, 1); 520 + queued_msg->skb = skb; 521 + 522 + 523 + second_mac_octet = skb->data[XPNET_PARTID_OCTET]; 524 + if (second_mac_octet == 0xff) { 525 + /* we are being asked to broadcast to all partitions */ 526 + dp = xpnet_broadcast_partitions; 527 + } else if (second_mac_octet != 0) { 528 + dp = xpnet_broadcast_partitions & 529 + (1UL << (second_mac_octet - 1)); 530 + } else { 531 + /* 0 is an invalid partid. Ignore */ 532 + dp = 0; 533 + } 534 + dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); 535 + 536 + /* 537 + * If we wanted to allow promiscous mode to work like an 538 + * unswitched network, this would be a good point to OR in a 539 + * mask of partitions which should be receiving all packets. 540 + */ 541 + 542 + /* 543 + * Main send loop. 544 + */ 545 + for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; 546 + dest_partid++) { 547 + 548 + 549 + if (!(dp & (1UL << (dest_partid - 1)))) { 550 + /* not destined for this partition */ 551 + continue; 552 + } 553 + 554 + /* remove this partition from the destinations mask */ 555 + dp &= ~(1UL << (dest_partid - 1)); 556 + 557 + 558 + /* found a partition to send to */ 559 + 560 + ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 561 + XPC_NOWAIT, (void **)&msg); 562 + if (unlikely(ret != xpcSuccess)) { 563 + continue; 564 + } 565 + 566 + msg->embedded_bytes = embedded_bytes; 567 + if (unlikely(embedded_bytes != 0)) { 568 + msg->version = XPNET_VERSION_EMBED; 569 + dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", 570 + &msg->data, skb->data, (size_t) embedded_bytes); 571 + memcpy(&msg->data, skb->data, (size_t) embedded_bytes); 572 + } else { 573 + msg->version = XPNET_VERSION; 574 + } 575 + msg->magic = XPNET_MAGIC; 576 + msg->size = end_addr - start_addr; 577 + msg->leadin_ignore = (u64) skb->data - start_addr; 578 + msg->tailout_ignore = end_addr - (u64) skb->tail; 579 + msg->buf_pa = __pa(start_addr); 580 + 581 + dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" 582 + "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " 583 + "msg->tailout_ignore=%u\n", dest_partid, 584 + XPC_NET_CHANNEL, msg->buf_pa, msg->size, 585 + msg->leadin_ignore, msg->tailout_ignore); 586 + 587 + 588 + atomic_inc(&queued_msg->use_count); 589 + 590 + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, 591 + xpnet_send_completed, queued_msg); 592 + if (unlikely(ret != xpcSuccess)) { 593 + atomic_dec(&queued_msg->use_count); 594 + continue; 595 + } 596 + 597 + } 598 + 599 + if (atomic_dec_return(&queued_msg->use_count) == 0) { 600 + dev_dbg(xpnet, "no partitions to receive packet destined for " 601 + "%d\n", dest_partid); 602 + 603 + 604 + dev_kfree_skb(skb); 605 + kfree(queued_msg); 606 + } 607 + 608 + priv->stats.tx_packets++; 609 + priv->stats.tx_bytes += skb->len; 610 + 611 + return 0; 612 + } 613 + 614 + 615 + /* 616 + * Deal with transmit timeouts coming from the network layer. 617 + */ 618 + static void 619 + xpnet_dev_tx_timeout (struct net_device *dev) 620 + { 621 + struct xpnet_dev_private *priv; 622 + 623 + 624 + priv = (struct xpnet_dev_private *) dev->priv; 625 + 626 + priv->stats.tx_errors++; 627 + return; 628 + } 629 + 630 + 631 + static int __init 632 + xpnet_init(void) 633 + { 634 + int i; 635 + u32 license_num; 636 + int result = -ENOMEM; 637 + 638 + 639 + dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); 640 + 641 + /* 642 + * use ether_setup() to init the majority of our device 643 + * structure and then override the necessary pieces. 644 + */ 645 + xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), 646 + XPNET_DEVICE_NAME, ether_setup); 647 + if (xpnet_device == NULL) { 648 + return -ENOMEM; 649 + } 650 + 651 + netif_carrier_off(xpnet_device); 652 + 653 + xpnet_device->mtu = XPNET_DEF_MTU; 654 + xpnet_device->change_mtu = xpnet_dev_change_mtu; 655 + xpnet_device->open = xpnet_dev_open; 656 + xpnet_device->get_stats = xpnet_dev_get_stats; 657 + xpnet_device->stop = xpnet_dev_stop; 658 + xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit; 659 + xpnet_device->tx_timeout = xpnet_dev_tx_timeout; 660 + xpnet_device->set_config = xpnet_dev_set_config; 661 + 662 + /* 663 + * Multicast assumes the LSB of the first octet is set for multicast 664 + * MAC addresses. We chose the first octet of the MAC to be unlikely 665 + * to collide with any vendor's officially issued MAC. 666 + */ 667 + xpnet_device->dev_addr[0] = 0xfe; 668 + xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id; 669 + license_num = sn_partition_serial_number_val(); 670 + for (i = 3; i >= 0; i--) { 671 + xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = 672 + license_num & 0xff; 673 + license_num = license_num >> 8; 674 + } 675 + 676 + /* 677 + * ether_setup() sets this to a multicast device. We are 678 + * really not supporting multicast at this time. 679 + */ 680 + xpnet_device->flags &= ~IFF_MULTICAST; 681 + 682 + /* 683 + * No need to checksum as it is a DMA transfer. The BTE will 684 + * report an error if the data is not retrievable and the 685 + * packet will be dropped. 686 + */ 687 + xpnet_device->features = NETIF_F_NO_CSUM; 688 + 689 + result = register_netdev(xpnet_device); 690 + if (result != 0) { 691 + free_netdev(xpnet_device); 692 + } 693 + 694 + return result; 695 + } 696 + module_init(xpnet_init); 697 + 698 + 699 + static void __exit 700 + xpnet_exit(void) 701 + { 702 + dev_info(xpnet, "unregistering network device %s\n", 703 + xpnet_device[0].name); 704 + 705 + unregister_netdev(xpnet_device); 706 + 707 + free_netdev(xpnet_device); 708 + } 709 + module_exit(xpnet_exit); 710 + 711 + 712 + MODULE_AUTHOR("Silicon Graphics, Inc."); 713 + MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); 714 + MODULE_LICENSE("GPL"); 715 +
+1 -1
arch/ia64/sn/pci/pcibr/pcibr_dma.c
··· 301 301 spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> 302 302 sfdl_flush_lock, flags); 303 303 304 - p->sfdl_flush_value = 0; 304 + *p->sfdl_flush_addr = 0; 305 305 306 306 /* force an interrupt. */ 307 307 *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
+1 -1
arch/ia64/sn/pci/tioca_provider.c
··· 431 431 ca_dmamap->cad_dma_addr = bus_addr; 432 432 ca_dmamap->cad_gart_size = entries; 433 433 ca_dmamap->cad_gart_entry = entry; 434 - list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list); 434 + list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); 435 435 436 436 if (xio_addr % ps) { 437 437 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
+1 -1
drivers/char/Kconfig
··· 408 408 409 409 config SGI_MBCS 410 410 tristate "SGI FPGA Core Services driver support" 411 - depends on (IA64_SGI_SN2 || IA64_GENERIC) 411 + depends on SGI_TIOCX 412 412 help 413 413 If you have an SGI Altix with an attached SABrick 414 414 say Y or M here, otherwise say N.
+8
include/asm-ia64/sn/addrs.h
··· 136 136 */ 137 137 #define CAC_BASE (CACHED | AS_CAC_SPACE) 138 138 #define AMO_BASE (UNCACHED | AS_AMO_SPACE) 139 + #define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE) 139 140 #define GET_BASE (CACHED | AS_GET_SPACE) 140 141 141 142 /* ··· 159 158 */ 160 159 #define PHYS_TO_TIODMA(x) ( (((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x)) 161 160 #define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) 161 + 162 + 163 + /* 164 + * Macros to test for address type. 165 + */ 166 + #define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE) 167 + #define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE) 162 168 163 169 164 170 /*
+16 -1
include/asm-ia64/sn/arch.h
··· 5 5 * 6 6 * SGI specific setup. 7 7 * 8 - * Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved. 8 + * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved. 9 9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) 10 10 */ 11 11 #ifndef _ASM_IA64_SN_ARCH_H ··· 46 46 */ 47 47 #define MAX_COMPACT_NODES 2048 48 48 #define CPUS_PER_NODE 4 49 + 50 + 51 + /* 52 + * Compact node ID to nasid mappings kept in the per-cpu data areas of each 53 + * cpu. 54 + */ 55 + DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); 56 + #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) 57 + 58 + 59 + 60 + extern u8 sn_partition_id; 61 + extern u8 sn_system_size; 62 + extern u8 sn_sharing_domain_size; 63 + extern u8 sn_region_size; 49 64 50 65 extern void sn_flush_all_caches(long addr, long bytes); 51 66
-85
include/asm-ia64/sn/fetchop.h
··· 1 - /* 2 - * 3 - * This file is subject to the terms and conditions of the GNU General Public 4 - * License. See the file "COPYING" in the main directory of this archive 5 - * for more details. 6 - * 7 - * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. 8 - */ 9 - 10 - #ifndef _ASM_IA64_SN_FETCHOP_H 11 - #define _ASM_IA64_SN_FETCHOP_H 12 - 13 - #include <linux/config.h> 14 - 15 - #define FETCHOP_BASENAME "sgi_fetchop" 16 - #define FETCHOP_FULLNAME "/dev/sgi_fetchop" 17 - 18 - 19 - 20 - #define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */ 21 - 22 - #define FETCHOP_LOAD 0 23 - #define FETCHOP_INCREMENT 8 24 - #define FETCHOP_DECREMENT 16 25 - #define FETCHOP_CLEAR 24 26 - 27 - #define FETCHOP_STORE 0 28 - #define FETCHOP_AND 24 29 - #define FETCHOP_OR 32 30 - 31 - #define FETCHOP_CLEAR_CACHE 56 32 - 33 - #define FETCHOP_LOAD_OP(addr, op) ( \ 34 - *(volatile long *)((char*) (addr) + (op))) 35 - 36 - #define FETCHOP_STORE_OP(addr, op, x) ( \ 37 - *(volatile long *)((char*) (addr) + (op)) = (long) (x)) 38 - 39 - #ifdef __KERNEL__ 40 - 41 - /* 42 - * Convert a region 6 (kaddr) address to the address of the fetchop variable 43 - */ 44 - #define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr) 45 - 46 - 47 - /* 48 - * Each Atomic Memory Operation (AMO formerly known as fetchop) 49 - * variable is 64 bytes long. The first 8 bytes are used. The 50 - * remaining 56 bytes are unaddressable due to the operation taking 51 - * that portion of the address. 52 - * 53 - * NOTE: The AMO_t _MUST_ be placed in either the first or second half 54 - * of the cache line. The cache line _MUST NOT_ be used for anything 55 - * other than additional AMO_t entries. This is because there are two 56 - * addresses which reference the same physical cache line. One will 57 - * be a cached entry with the memory type bits all set. This address 58 - * may be loaded into processor cache. The AMO_t will be referenced 59 - * uncached via the memory special memory type. If any portion of the 60 - * cached cache-line is modified, when that line is flushed, it will 61 - * overwrite the uncached value in physical memory and lead to 62 - * inconsistency. 63 - */ 64 - typedef struct { 65 - u64 variable; 66 - u64 unused[7]; 67 - } AMO_t; 68 - 69 - 70 - /* 71 - * The following APIs are externalized to the kernel to allocate/free pages of 72 - * fetchop variables. 73 - * fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the 74 - * specified cnode. 75 - * fetchop_kfree_page - Free a previously allocated fetchop page 76 - */ 77 - 78 - unsigned long fetchop_kalloc_page(int nid); 79 - void fetchop_kfree_page(unsigned long maddr); 80 - 81 - 82 - #endif /* __KERNEL__ */ 83 - 84 - #endif /* _ASM_IA64_SN_FETCHOP_H */ 85 -
+2 -1
include/asm-ia64/sn/l1.h
··· 29 29 #define L1_BRICKTYPE_CHI_CG 0x76 /* v */ 30 30 #define L1_BRICKTYPE_X 0x78 /* x */ 31 31 #define L1_BRICKTYPE_X2 0x79 /* y */ 32 - #define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */ 32 + #define L1_BRICKTYPE_SA 0x5e /* ^ */ 33 33 #define L1_BRICKTYPE_PA 0x6a /* j */ 34 34 #define L1_BRICKTYPE_IA 0x6b /* k */ 35 + #define L1_BRICKTYPE_ATHENA 0x2b /* + */ 35 36 36 37 #endif /* _ASM_IA64_SN_L1_H */
+6 -9
include/asm-ia64/sn/nodepda.h
··· 13 13 #include <asm/irq.h> 14 14 #include <asm/sn/arch.h> 15 15 #include <asm/sn/intr.h> 16 - #include <asm/sn/pda.h> 17 16 #include <asm/sn/bte.h> 18 17 19 18 /* ··· 66 67 * The next set of definitions provides this. 67 68 * Routines are expected to use 68 69 * 69 - * nodepda -> to access node PDA for the node on which code is running 70 - * subnodepda -> to access subnode PDA for the subnode on which code is running 71 - * 72 - * NODEPDA(cnode) -> to access node PDA for cnodeid 73 - * SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode 70 + * sn_nodepda - to access node PDA for the node on which code is running 71 + * NODEPDA(cnodeid) - to access node PDA for cnodeid 74 72 */ 75 73 76 - #define nodepda pda->p_nodepda /* Ptr to this node's PDA */ 77 - #define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode]) 74 + DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); 75 + #define sn_nodepda (__get_cpu_var(__sn_nodepda)) 76 + #define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) 78 77 79 78 /* 80 79 * Check if given a compact node id the corresponding node has all the 81 80 * cpus disabled. 82 81 */ 83 - #define is_headless_node(cnode) (nr_cpus_node(cnode) == 0) 82 + #define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0) 84 83 85 84 #endif /* _ASM_IA64_SN_NODEPDA_H */
-9
include/asm-ia64/sn/pda.h
··· 24 24 25 25 typedef struct pda_s { 26 26 27 - /* Having a pointer in the begining of PDA tends to increase 28 - * the chance of having this pointer in cache. (Yes something 29 - * else gets pushed out). Doing this reduces the number of memory 30 - * access to all nodepda variables to be one 31 - */ 32 - struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */ 33 - struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */ 34 - 35 27 /* 36 28 * Support for SN LEDs 37 29 */ ··· 41 49 42 50 unsigned long sn_soft_irr[4]; 43 51 unsigned long sn_in_service_ivecs[4]; 44 - short cnodeid_to_nasid_table[MAX_NUMNODES]; 45 52 int sn_lb_int_war_ticks; 46 53 int sn_last_irq; 47 54 int sn_first_irq;
+24
include/asm-ia64/sn/shub_mmr.h
··· 385 385 #define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000 386 386 387 387 /* ==================================================================== */ 388 + /* Register "SH_IPI_ACCESS" */ 389 + /* CPU interrupt Access Permission Bits */ 390 + /* ==================================================================== */ 391 + 392 + #define SH1_IPI_ACCESS 0x0000000110060480 393 + #define SH2_IPI_ACCESS0 0x0000000010060c00 394 + #define SH2_IPI_ACCESS1 0x0000000010060c80 395 + #define SH2_IPI_ACCESS2 0x0000000010060d00 396 + #define SH2_IPI_ACCESS3 0x0000000010060d80 397 + 398 + /* ==================================================================== */ 388 399 /* Register "SH_INT_CMPB" */ 389 400 /* RTC Compare Value for Processor B */ 390 401 /* ==================================================================== */ ··· 440 429 #define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 441 430 #define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff 442 431 432 + /* ==================================================================== */ 433 + /* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */ 434 + /* privilege vector for acc=0 */ 435 + /* ==================================================================== */ 436 + 437 + #define SH1_MD_DQLP_MMR_DIR_PRIVEC0 0x0000000100030300 438 + 439 + /* ==================================================================== */ 440 + /* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */ 441 + /* privilege vector for acc=0 */ 442 + /* ==================================================================== */ 443 + 444 + #define SH1_MD_DQRP_MMR_DIR_PRIVEC0 0x0000000100050300 443 445 444 446 /* ==================================================================== */ 445 447 /* Some MMRs are functionally identical (or close enough) on both SHUB1 */
+1462 -1580
include/asm-ia64/sn/shubio.h
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. 7 7 */ 8 8 9 9 #ifndef _ASM_IA64_SN_SHUBIO_H 10 10 #define _ASM_IA64_SN_SHUBIO_H 11 11 12 - #define HUB_WIDGET_ID_MAX 0xf 13 - #define IIO_NUM_ITTES 7 14 - #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 12 + #define HUB_WIDGET_ID_MAX 0xf 13 + #define IIO_NUM_ITTES 7 14 + #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 15 15 16 - #define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ 17 - /* This register is also accessible from 18 - * Crosstalk at address 0x0. */ 19 - #define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ 20 - #define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ 21 - #define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ 22 - #define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ 23 - #define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ 24 - #define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ 25 - #define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ 26 - #define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ 27 - #define IIO_ILLR 0x00400130 /* IO LLP Log Register */ 28 - #define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ 16 + #define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ 17 + /* This register is also accessible from 18 + * Crosstalk at address 0x0. */ 19 + #define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ 20 + #define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ 21 + #define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ 22 + #define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ 23 + #define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ 24 + #define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ 25 + #define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ 26 + #define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ 27 + #define IIO_ILLR 0x00400130 /* IO LLP Log Register */ 28 + #define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ 29 29 30 - #define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ 31 - #define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ 30 + #define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ 31 + #define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ 32 32 33 - #define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ 34 - #define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ 33 + #define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ 34 + #define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ 35 35 36 - #define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ 37 - #define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ 38 - #define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ 39 - #define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ 40 - #define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ 41 - #define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ 42 - #define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ 36 + #define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ 37 + #define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ 38 + #define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ 39 + #define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ 40 + #define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ 41 + #define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ 42 + #define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ 43 43 44 - #define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ 45 - #define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ 46 - #define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ 47 - #define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ 48 - #define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ 49 - #define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ 50 - #define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ 51 - #define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ 52 - #define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ 44 + #define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ 45 + #define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ 46 + #define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ 47 + #define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ 48 + #define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ 49 + #define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ 50 + #define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ 51 + #define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ 52 + #define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ 53 53 54 - #define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ 55 - #define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ 56 - #define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ 57 - #define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ 58 - #define IIO_IBCR 0x00400200 /* IO BTE Control Register */ 54 + #define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ 55 + #define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ 56 + #define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ 57 + #define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ 58 + #define IIO_IBCR 0x00400200 /* IO BTE Control Register */ 59 59 60 - #define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ 61 - #define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ 60 + #define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ 61 + #define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ 62 62 63 - #define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ 63 + #define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ 64 64 65 - #define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ 66 - #define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ 65 + #define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ 66 + #define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ 67 67 68 + #define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ 69 + #define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ 68 70 69 - #define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ 70 - #define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ 71 + #define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ 72 + #define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ 73 + #define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ 74 + #define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ 75 + #define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ 71 76 72 - #define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ 73 - #define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ 74 - #define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ 75 - #define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ 76 - #define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ 77 + #define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ 77 78 78 - #define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ 79 + #define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ 80 + #define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ 81 + #define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ 82 + #define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ 83 + #define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ 84 + #define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ 85 + #define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ 86 + #define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ 79 87 80 - #define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ 81 - #define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ 82 - #define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ 83 - #define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ 84 - #define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ 85 - #define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ 86 - #define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ 87 - #define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ 88 + #define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ 89 + #define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ 90 + #define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ 91 + #define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ 92 + #define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ 93 + #define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ 94 + #define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ 95 + #define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ 88 96 89 - #define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ 90 - #define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ 91 - #define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ 92 - #define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ 93 - #define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ 94 - #define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ 95 - #define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ 96 - #define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ 97 + #define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ 98 + #define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ 99 + #define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ 100 + #define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ 101 + #define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ 102 + #define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ 103 + #define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ 104 + #define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ 97 105 98 - #define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ 99 - #define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ 100 - #define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ 101 - #define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ 102 - #define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ 103 - #define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ 104 - #define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ 105 - #define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ 106 + #define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ 107 + #define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ 108 + #define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ 109 + #define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ 110 + #define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ 106 111 107 - #define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ 108 - #define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ 109 - #define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ 110 - #define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ 111 - #define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ 112 + #define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ 113 + #define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ 114 + #define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ 115 + #define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ 116 + #define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ 112 117 113 - #define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ 114 - #define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ 115 - #define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ 116 - #define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ 117 - #define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ 118 + #define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ 119 + #define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ 120 + #define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ 121 + #define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ 122 + #define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ 118 123 119 - #define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ 120 - #define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ 121 - #define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ 122 - #define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ 123 - #define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ 124 + #define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ 125 + #define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ 126 + #define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ 127 + #define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ 128 + #define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ 124 129 125 - #define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ 126 - #define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ 127 - #define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ 128 - #define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ 129 - #define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ 130 + #define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ 131 + #define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ 132 + #define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ 133 + #define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ 134 + #define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ 130 135 131 - #define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ 132 - #define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ 133 - #define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ 134 - #define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ 135 - #define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ 136 + #define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ 137 + #define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ 138 + #define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ 139 + #define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ 140 + #define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ 136 141 137 - #define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ 138 - #define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ 139 - #define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ 140 - #define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ 141 - #define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ 142 + #define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ 143 + #define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ 144 + #define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ 145 + #define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ 146 + #define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ 142 147 143 - #define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ 144 - #define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ 145 - #define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ 146 - #define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ 147 - #define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ 148 + #define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ 149 + #define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ 150 + #define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ 151 + #define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ 152 + #define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ 148 153 149 - #define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ 150 - #define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ 151 - #define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ 152 - #define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ 153 - #define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ 154 + #define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ 155 + #define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ 156 + #define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ 157 + #define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ 158 + #define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ 154 159 155 - #define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ 156 - #define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ 157 - #define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ 158 - #define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ 159 - #define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ 160 + #define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ 161 + #define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ 162 + #define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ 163 + #define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ 164 + #define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ 160 165 161 - #define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ 162 - #define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ 163 - #define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ 164 - #define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ 165 - #define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ 166 + #define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ 167 + #define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ 168 + #define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ 169 + #define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ 170 + #define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ 166 171 167 - #define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ 168 - #define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ 169 - #define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ 170 - #define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ 171 - #define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ 172 + #define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ 173 + #define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ 174 + #define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ 175 + #define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ 176 + #define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ 172 177 173 - #define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ 174 - #define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ 175 - #define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ 176 - #define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ 177 - #define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ 178 + #define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ 179 + #define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ 180 + #define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ 181 + #define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ 182 + #define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ 178 183 179 - #define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ 180 - #define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ 181 - #define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ 182 - #define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ 183 - #define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ 184 + #define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ 185 + #define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ 186 + #define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ 187 + #define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ 188 + #define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ 184 189 185 - #define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ 186 - #define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ 187 - #define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ 188 - #define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ 189 - #define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ 190 + #define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ 191 + #define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ 192 + #define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ 193 + #define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ 194 + #define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ 190 195 191 - #define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ 192 - #define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ 193 - #define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ 194 - #define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ 195 - #define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ 196 + #define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ 197 + #define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ 198 + #define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ 196 199 197 - #define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ 198 - #define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ 199 - #define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ 200 + #define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ 200 201 201 - #define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ 202 + #define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ 203 + #define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ 204 + #define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ 205 + #define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ 206 + #define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ 207 + #define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ 208 + #define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ 209 + #define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ 210 + #define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ 211 + #define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ 212 + #define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ 213 + #define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ 202 214 203 - #define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ 204 - #define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ 205 - #define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ 206 - #define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ 207 - #define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ 208 - #define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ 209 - #define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ 210 - #define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ 211 - #define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ 212 - #define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ 213 - #define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ 214 - #define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ 215 - 216 - #define IIO_IPCR 0x00430000 /* IO Performance Control */ 217 - #define IIO_IPPR 0x00430008 /* IO Performance Profiling */ 218 - 215 + #define IIO_IPCR 0x00430000 /* IO Performance Control */ 216 + #define IIO_IPPR 0x00430008 /* IO Performance Profiling */ 219 217 220 218 /************************************************************************ 221 - * * 219 + * * 222 220 * Description: This register echoes some information from the * 223 221 * LB_REV_ID register. It is available through Crosstalk as described * 224 222 * above. The REV_NUM and MFG_NUM fields receive their values from * 225 223 * the REVISION and MANUFACTURER fields in the LB_REV_ID register. * 226 224 * The PART_NUM field's value is the Crosstalk device ID number that * 227 225 * Steve Miller assigned to the SHub chip. * 228 - * * 226 + * * 229 227 ************************************************************************/ 230 228 231 229 typedef union ii_wid_u { 232 - uint64_t ii_wid_regval; 233 - struct { 234 - uint64_t w_rsvd_1 : 1; 235 - uint64_t w_mfg_num : 11; 236 - uint64_t w_part_num : 16; 237 - uint64_t w_rev_num : 4; 238 - uint64_t w_rsvd : 32; 230 + uint64_t ii_wid_regval; 231 + struct { 232 + uint64_t w_rsvd_1:1; 233 + uint64_t w_mfg_num:11; 234 + uint64_t w_part_num:16; 235 + uint64_t w_rev_num:4; 236 + uint64_t w_rsvd:32; 239 237 } ii_wid_fld_s; 240 238 } ii_wid_u_t; 241 239 242 - 243 240 /************************************************************************ 244 - * * 241 + * * 245 242 * The fields in this register are set upon detection of an error * 246 243 * and cleared by various mechanisms, as explained in the * 247 244 * description. * 248 - * * 245 + * * 249 246 ************************************************************************/ 250 247 251 248 typedef union ii_wstat_u { 252 - uint64_t ii_wstat_regval; 253 - struct { 254 - uint64_t w_pending : 4; 255 - uint64_t w_xt_crd_to : 1; 256 - uint64_t w_xt_tail_to : 1; 257 - uint64_t w_rsvd_3 : 3; 258 - uint64_t w_tx_mx_rty : 1; 259 - uint64_t w_rsvd_2 : 6; 260 - uint64_t w_llp_tx_cnt : 8; 261 - uint64_t w_rsvd_1 : 8; 262 - uint64_t w_crazy : 1; 263 - uint64_t w_rsvd : 31; 249 + uint64_t ii_wstat_regval; 250 + struct { 251 + uint64_t w_pending:4; 252 + uint64_t w_xt_crd_to:1; 253 + uint64_t w_xt_tail_to:1; 254 + uint64_t w_rsvd_3:3; 255 + uint64_t w_tx_mx_rty:1; 256 + uint64_t w_rsvd_2:6; 257 + uint64_t w_llp_tx_cnt:8; 258 + uint64_t w_rsvd_1:8; 259 + uint64_t w_crazy:1; 260 + uint64_t w_rsvd:31; 264 261 } ii_wstat_fld_s; 265 262 } ii_wstat_u_t; 266 263 267 - 268 264 /************************************************************************ 269 - * * 265 + * * 270 266 * Description: This is a read-write enabled register. It controls * 271 267 * various aspects of the Crosstalk flow control. * 272 - * * 268 + * * 273 269 ************************************************************************/ 274 270 275 271 typedef union ii_wcr_u { 276 - uint64_t ii_wcr_regval; 277 - struct { 278 - uint64_t w_wid : 4; 279 - uint64_t w_tag : 1; 280 - uint64_t w_rsvd_1 : 8; 281 - uint64_t w_dst_crd : 3; 282 - uint64_t w_f_bad_pkt : 1; 283 - uint64_t w_dir_con : 1; 284 - uint64_t w_e_thresh : 5; 285 - uint64_t w_rsvd : 41; 272 + uint64_t ii_wcr_regval; 273 + struct { 274 + uint64_t w_wid:4; 275 + uint64_t w_tag:1; 276 + uint64_t w_rsvd_1:8; 277 + uint64_t w_dst_crd:3; 278 + uint64_t w_f_bad_pkt:1; 279 + uint64_t w_dir_con:1; 280 + uint64_t w_e_thresh:5; 281 + uint64_t w_rsvd:41; 286 282 } ii_wcr_fld_s; 287 283 } ii_wcr_u_t; 288 284 289 - 290 285 /************************************************************************ 291 - * * 286 + * * 292 287 * Description: This register's value is a bit vector that guards * 293 288 * access to local registers within the II as well as to external * 294 289 * Crosstalk widgets. Each bit in the register corresponds to a * ··· 306 311 * region ID bits are enabled in this same register. It can also be * 307 312 * accessed through the IAlias space by the local processors. * 308 313 * The reset value of this register allows access by all nodes. * 309 - * * 314 + * * 310 315 ************************************************************************/ 311 316 312 317 typedef union ii_ilapr_u { 313 - uint64_t ii_ilapr_regval; 314 - struct { 315 - uint64_t i_region : 64; 318 + uint64_t ii_ilapr_regval; 319 + struct { 320 + uint64_t i_region:64; 316 321 } ii_ilapr_fld_s; 317 322 } ii_ilapr_u_t; 318 323 319 - 320 - 321 - 322 324 /************************************************************************ 323 - * * 325 + * * 324 326 * Description: A write to this register of the 64-bit value * 325 327 * "SGIrules" in ASCII, will cause the bit in the ILAPR register * 326 328 * corresponding to the region of the requestor to be set (allow * ··· 326 334 * This register can also be accessed through the IAlias space. * 327 335 * However, this access will not change the access permissions in the * 328 336 * ILAPR. * 329 - * * 337 + * * 330 338 ************************************************************************/ 331 339 332 340 typedef union ii_ilapo_u { 333 - uint64_t ii_ilapo_regval; 334 - struct { 335 - uint64_t i_io_ovrride : 64; 341 + uint64_t ii_ilapo_regval; 342 + struct { 343 + uint64_t i_io_ovrride:64; 336 344 } ii_ilapo_fld_s; 337 345 } ii_ilapo_u_t; 338 346 339 - 340 - 341 347 /************************************************************************ 342 - * * 348 + * * 343 349 * This register qualifies all the PIO and Graphics writes launched * 344 350 * from the SHUB towards a widget. * 345 - * * 351 + * * 346 352 ************************************************************************/ 347 353 348 354 typedef union ii_iowa_u { 349 - uint64_t ii_iowa_regval; 350 - struct { 351 - uint64_t i_w0_oac : 1; 352 - uint64_t i_rsvd_1 : 7; 353 - uint64_t i_wx_oac : 8; 354 - uint64_t i_rsvd : 48; 355 + uint64_t ii_iowa_regval; 356 + struct { 357 + uint64_t i_w0_oac:1; 358 + uint64_t i_rsvd_1:7; 359 + uint64_t i_wx_oac:8; 360 + uint64_t i_rsvd:48; 355 361 } ii_iowa_fld_s; 356 362 } ii_iowa_u_t; 357 363 358 - 359 364 /************************************************************************ 360 - * * 365 + * * 361 366 * Description: This register qualifies all the requests launched * 362 367 * from a widget towards the Shub. This register is intended to be * 363 368 * used by software in case of misbehaving widgets. * 364 - * * 365 - * * 369 + * * 370 + * * 366 371 ************************************************************************/ 367 372 368 373 typedef union ii_iiwa_u { 369 - uint64_t ii_iiwa_regval; 370 - struct { 371 - uint64_t i_w0_iac : 1; 372 - uint64_t i_rsvd_1 : 7; 373 - uint64_t i_wx_iac : 8; 374 - uint64_t i_rsvd : 48; 374 + uint64_t ii_iiwa_regval; 375 + struct { 376 + uint64_t i_w0_iac:1; 377 + uint64_t i_rsvd_1:7; 378 + uint64_t i_wx_iac:8; 379 + uint64_t i_rsvd:48; 375 380 } ii_iiwa_fld_s; 376 381 } ii_iiwa_u_t; 377 382 378 - 379 - 380 383 /************************************************************************ 381 - * * 384 + * * 382 385 * Description: This register qualifies all the operations launched * 383 386 * from a widget towards the SHub. It allows individual access * 384 387 * control for up to 8 devices per widget. A device refers to * ··· 388 401 * The bits in this field are set by writing a 1 to them. Incoming * 389 402 * replies from Crosstalk are not subject to this access control * 390 403 * mechanism. * 391 - * * 404 + * * 392 405 ************************************************************************/ 393 406 394 407 typedef union ii_iidem_u { 395 - uint64_t ii_iidem_regval; 396 - struct { 397 - uint64_t i_w8_dxs : 8; 398 - uint64_t i_w9_dxs : 8; 399 - uint64_t i_wa_dxs : 8; 400 - uint64_t i_wb_dxs : 8; 401 - uint64_t i_wc_dxs : 8; 402 - uint64_t i_wd_dxs : 8; 403 - uint64_t i_we_dxs : 8; 404 - uint64_t i_wf_dxs : 8; 408 + uint64_t ii_iidem_regval; 409 + struct { 410 + uint64_t i_w8_dxs:8; 411 + uint64_t i_w9_dxs:8; 412 + uint64_t i_wa_dxs:8; 413 + uint64_t i_wb_dxs:8; 414 + uint64_t i_wc_dxs:8; 415 + uint64_t i_wd_dxs:8; 416 + uint64_t i_we_dxs:8; 417 + uint64_t i_wf_dxs:8; 405 418 } ii_iidem_fld_s; 406 419 } ii_iidem_u_t; 407 420 408 - 409 421 /************************************************************************ 410 - * * 422 + * * 411 423 * This register contains the various programmable fields necessary * 412 424 * for controlling and observing the LLP signals. * 413 - * * 425 + * * 414 426 ************************************************************************/ 415 427 416 428 typedef union ii_ilcsr_u { 417 - uint64_t ii_ilcsr_regval; 418 - struct { 419 - uint64_t i_nullto : 6; 420 - uint64_t i_rsvd_4 : 2; 421 - uint64_t i_wrmrst : 1; 422 - uint64_t i_rsvd_3 : 1; 423 - uint64_t i_llp_en : 1; 424 - uint64_t i_bm8 : 1; 425 - uint64_t i_llp_stat : 2; 426 - uint64_t i_remote_power : 1; 427 - uint64_t i_rsvd_2 : 1; 428 - uint64_t i_maxrtry : 10; 429 - uint64_t i_d_avail_sel : 2; 430 - uint64_t i_rsvd_1 : 4; 431 - uint64_t i_maxbrst : 10; 432 - uint64_t i_rsvd : 22; 429 + uint64_t ii_ilcsr_regval; 430 + struct { 431 + uint64_t i_nullto:6; 432 + uint64_t i_rsvd_4:2; 433 + uint64_t i_wrmrst:1; 434 + uint64_t i_rsvd_3:1; 435 + uint64_t i_llp_en:1; 436 + uint64_t i_bm8:1; 437 + uint64_t i_llp_stat:2; 438 + uint64_t i_remote_power:1; 439 + uint64_t i_rsvd_2:1; 440 + uint64_t i_maxrtry:10; 441 + uint64_t i_d_avail_sel:2; 442 + uint64_t i_rsvd_1:4; 443 + uint64_t i_maxbrst:10; 444 + uint64_t i_rsvd:22; 433 445 434 446 } ii_ilcsr_fld_s; 435 447 } ii_ilcsr_u_t; 436 448 437 - 438 449 /************************************************************************ 439 - * * 450 + * * 440 451 * This is simply a status registers that monitors the LLP error * 441 - * rate. * 442 - * * 452 + * rate. * 453 + * * 443 454 ************************************************************************/ 444 455 445 456 typedef union ii_illr_u { 446 - uint64_t ii_illr_regval; 447 - struct { 448 - uint64_t i_sn_cnt : 16; 449 - uint64_t i_cb_cnt : 16; 450 - uint64_t i_rsvd : 32; 457 + uint64_t ii_illr_regval; 458 + struct { 459 + uint64_t i_sn_cnt:16; 460 + uint64_t i_cb_cnt:16; 461 + uint64_t i_rsvd:32; 451 462 } ii_illr_fld_s; 452 463 } ii_illr_u_t; 453 464 454 - 455 465 /************************************************************************ 456 - * * 466 + * * 457 467 * Description: All II-detected non-BTE error interrupts are * 458 468 * specified via this register. * 459 469 * NOTE: The PI interrupt register address is hardcoded in the II. If * ··· 460 476 * PI_ID==1, then the II sends the interrupt request to address * 461 477 * offset 0x01A0_0090 within the local register address space of PI1 * 462 478 * on the node specified by the NODE field. * 463 - * * 479 + * * 464 480 ************************************************************************/ 465 481 466 482 typedef union ii_iidsr_u { 467 - uint64_t ii_iidsr_regval; 468 - struct { 469 - uint64_t i_level : 8; 470 - uint64_t i_pi_id : 1; 471 - uint64_t i_node : 11; 472 - uint64_t i_rsvd_3 : 4; 473 - uint64_t i_enable : 1; 474 - uint64_t i_rsvd_2 : 3; 475 - uint64_t i_int_sent : 2; 476 - uint64_t i_rsvd_1 : 2; 477 - uint64_t i_pi0_forward_int : 1; 478 - uint64_t i_pi1_forward_int : 1; 479 - uint64_t i_rsvd : 30; 483 + uint64_t ii_iidsr_regval; 484 + struct { 485 + uint64_t i_level:8; 486 + uint64_t i_pi_id:1; 487 + uint64_t i_node:11; 488 + uint64_t i_rsvd_3:4; 489 + uint64_t i_enable:1; 490 + uint64_t i_rsvd_2:3; 491 + uint64_t i_int_sent:2; 492 + uint64_t i_rsvd_1:2; 493 + uint64_t i_pi0_forward_int:1; 494 + uint64_t i_pi1_forward_int:1; 495 + uint64_t i_rsvd:30; 480 496 } ii_iidsr_fld_s; 481 497 } ii_iidsr_u_t; 482 498 483 - 484 - 485 499 /************************************************************************ 486 - * * 500 + * * 487 501 * There are two instances of this register. This register is used * 488 502 * for matching up the incoming responses from the graphics widget to * 489 503 * the processor that initiated the graphics operation. The * 490 504 * write-responses are converted to graphics credits and returned to * 491 505 * the processor so that the processor interface can manage the flow * 492 506 * control. * 493 - * * 507 + * * 494 508 ************************************************************************/ 495 509 496 510 typedef union ii_igfx0_u { 497 - uint64_t ii_igfx0_regval; 498 - struct { 499 - uint64_t i_w_num : 4; 500 - uint64_t i_pi_id : 1; 501 - uint64_t i_n_num : 12; 502 - uint64_t i_p_num : 1; 503 - uint64_t i_rsvd : 46; 511 + uint64_t ii_igfx0_regval; 512 + struct { 513 + uint64_t i_w_num:4; 514 + uint64_t i_pi_id:1; 515 + uint64_t i_n_num:12; 516 + uint64_t i_p_num:1; 517 + uint64_t i_rsvd:46; 504 518 } ii_igfx0_fld_s; 505 519 } ii_igfx0_u_t; 506 520 507 - 508 521 /************************************************************************ 509 - * * 522 + * * 510 523 * There are two instances of this register. This register is used * 511 524 * for matching up the incoming responses from the graphics widget to * 512 525 * the processor that initiated the graphics operation. The * 513 526 * write-responses are converted to graphics credits and returned to * 514 527 * the processor so that the processor interface can manage the flow * 515 528 * control. * 516 - * * 529 + * * 517 530 ************************************************************************/ 518 531 519 532 typedef union ii_igfx1_u { 520 - uint64_t ii_igfx1_regval; 521 - struct { 522 - uint64_t i_w_num : 4; 523 - uint64_t i_pi_id : 1; 524 - uint64_t i_n_num : 12; 525 - uint64_t i_p_num : 1; 526 - uint64_t i_rsvd : 46; 533 + uint64_t ii_igfx1_regval; 534 + struct { 535 + uint64_t i_w_num:4; 536 + uint64_t i_pi_id:1; 537 + uint64_t i_n_num:12; 538 + uint64_t i_p_num:1; 539 + uint64_t i_rsvd:46; 527 540 } ii_igfx1_fld_s; 528 541 } ii_igfx1_u_t; 529 542 530 - 531 543 /************************************************************************ 532 - * * 544 + * * 533 545 * There are two instances of this registers. These registers are * 534 546 * used as scratch registers for software use. * 535 - * * 547 + * * 536 548 ************************************************************************/ 537 549 538 550 typedef union ii_iscr0_u { 539 - uint64_t ii_iscr0_regval; 540 - struct { 541 - uint64_t i_scratch : 64; 551 + uint64_t ii_iscr0_regval; 552 + struct { 553 + uint64_t i_scratch:64; 542 554 } ii_iscr0_fld_s; 543 555 } ii_iscr0_u_t; 544 556 545 - 546 - 547 557 /************************************************************************ 548 - * * 558 + * * 549 559 * There are two instances of this registers. These registers are * 550 560 * used as scratch registers for software use. * 551 - * * 561 + * * 552 562 ************************************************************************/ 553 563 554 564 typedef union ii_iscr1_u { 555 - uint64_t ii_iscr1_regval; 556 - struct { 557 - uint64_t i_scratch : 64; 565 + uint64_t ii_iscr1_regval; 566 + struct { 567 + uint64_t i_scratch:64; 558 568 } ii_iscr1_fld_s; 559 569 } ii_iscr1_u_t; 560 570 561 - 562 571 /************************************************************************ 563 - * * 572 + * * 564 573 * Description: There are seven instances of translation table entry * 565 574 * registers. Each register maps a Shub Big Window to a 48-bit * 566 575 * address on Crosstalk. * ··· 576 599 * Crosstalk space addressable by the Shub is thus the lower * 577 600 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 578 601 * of this space can be accessed. * 579 - * * 602 + * * 580 603 ************************************************************************/ 581 604 582 605 typedef union ii_itte1_u { 583 - uint64_t ii_itte1_regval; 584 - struct { 585 - uint64_t i_offset : 5; 586 - uint64_t i_rsvd_1 : 3; 587 - uint64_t i_w_num : 4; 588 - uint64_t i_iosp : 1; 589 - uint64_t i_rsvd : 51; 606 + uint64_t ii_itte1_regval; 607 + struct { 608 + uint64_t i_offset:5; 609 + uint64_t i_rsvd_1:3; 610 + uint64_t i_w_num:4; 611 + uint64_t i_iosp:1; 612 + uint64_t i_rsvd:51; 590 613 } ii_itte1_fld_s; 591 614 } ii_itte1_u_t; 592 615 593 - 594 616 /************************************************************************ 595 - * * 617 + * * 596 618 * Description: There are seven instances of translation table entry * 597 619 * registers. Each register maps a Shub Big Window to a 48-bit * 598 620 * address on Crosstalk. * ··· 614 638 * Crosstalk space addressable by the Shub is thus the lower * 615 639 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 616 640 * of this space can be accessed. * 617 - * * 641 + * * 618 642 ************************************************************************/ 619 643 620 644 typedef union ii_itte2_u { 621 - uint64_t ii_itte2_regval; 622 - struct { 623 - uint64_t i_offset : 5; 624 - uint64_t i_rsvd_1 : 3; 625 - uint64_t i_w_num : 4; 626 - uint64_t i_iosp : 1; 627 - uint64_t i_rsvd : 51; 645 + uint64_t ii_itte2_regval; 646 + struct { 647 + uint64_t i_offset:5; 648 + uint64_t i_rsvd_1:3; 649 + uint64_t i_w_num:4; 650 + uint64_t i_iosp:1; 651 + uint64_t i_rsvd:51; 628 652 } ii_itte2_fld_s; 629 653 } ii_itte2_u_t; 630 654 631 - 632 655 /************************************************************************ 633 - * * 656 + * * 634 657 * Description: There are seven instances of translation table entry * 635 658 * registers. Each register maps a Shub Big Window to a 48-bit * 636 659 * address on Crosstalk. * ··· 652 677 * Crosstalk space addressable by the SHub is thus the lower * 653 678 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 654 679 * of this space can be accessed. * 655 - * * 680 + * * 656 681 ************************************************************************/ 657 682 658 683 typedef union ii_itte3_u { 659 - uint64_t ii_itte3_regval; 660 - struct { 661 - uint64_t i_offset : 5; 662 - uint64_t i_rsvd_1 : 3; 663 - uint64_t i_w_num : 4; 664 - uint64_t i_iosp : 1; 665 - uint64_t i_rsvd : 51; 684 + uint64_t ii_itte3_regval; 685 + struct { 686 + uint64_t i_offset:5; 687 + uint64_t i_rsvd_1:3; 688 + uint64_t i_w_num:4; 689 + uint64_t i_iosp:1; 690 + uint64_t i_rsvd:51; 666 691 } ii_itte3_fld_s; 667 692 } ii_itte3_u_t; 668 693 669 - 670 694 /************************************************************************ 671 - * * 695 + * * 672 696 * Description: There are seven instances of translation table entry * 673 697 * registers. Each register maps a SHub Big Window to a 48-bit * 674 698 * address on Crosstalk. * ··· 690 716 * Crosstalk space addressable by the SHub is thus the lower * 691 717 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 692 718 * of this space can be accessed. * 693 - * * 719 + * * 694 720 ************************************************************************/ 695 721 696 722 typedef union ii_itte4_u { 697 - uint64_t ii_itte4_regval; 698 - struct { 699 - uint64_t i_offset : 5; 700 - uint64_t i_rsvd_1 : 3; 701 - uint64_t i_w_num : 4; 702 - uint64_t i_iosp : 1; 703 - uint64_t i_rsvd : 51; 723 + uint64_t ii_itte4_regval; 724 + struct { 725 + uint64_t i_offset:5; 726 + uint64_t i_rsvd_1:3; 727 + uint64_t i_w_num:4; 728 + uint64_t i_iosp:1; 729 + uint64_t i_rsvd:51; 704 730 } ii_itte4_fld_s; 705 731 } ii_itte4_u_t; 706 732 707 - 708 733 /************************************************************************ 709 - * * 734 + * * 710 735 * Description: There are seven instances of translation table entry * 711 736 * registers. Each register maps a SHub Big Window to a 48-bit * 712 737 * address on Crosstalk. * ··· 728 755 * Crosstalk space addressable by the Shub is thus the lower * 729 756 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 730 757 * of this space can be accessed. * 731 - * * 758 + * * 732 759 ************************************************************************/ 733 760 734 761 typedef union ii_itte5_u { 735 - uint64_t ii_itte5_regval; 736 - struct { 737 - uint64_t i_offset : 5; 738 - uint64_t i_rsvd_1 : 3; 739 - uint64_t i_w_num : 4; 740 - uint64_t i_iosp : 1; 741 - uint64_t i_rsvd : 51; 762 + uint64_t ii_itte5_regval; 763 + struct { 764 + uint64_t i_offset:5; 765 + uint64_t i_rsvd_1:3; 766 + uint64_t i_w_num:4; 767 + uint64_t i_iosp:1; 768 + uint64_t i_rsvd:51; 742 769 } ii_itte5_fld_s; 743 770 } ii_itte5_u_t; 744 771 745 - 746 772 /************************************************************************ 747 - * * 773 + * * 748 774 * Description: There are seven instances of translation table entry * 749 775 * registers. Each register maps a Shub Big Window to a 48-bit * 750 776 * address on Crosstalk. * ··· 766 794 * Crosstalk space addressable by the Shub is thus the lower * 767 795 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 768 796 * of this space can be accessed. * 769 - * * 797 + * * 770 798 ************************************************************************/ 771 799 772 800 typedef union ii_itte6_u { 773 - uint64_t ii_itte6_regval; 774 - struct { 775 - uint64_t i_offset : 5; 776 - uint64_t i_rsvd_1 : 3; 777 - uint64_t i_w_num : 4; 778 - uint64_t i_iosp : 1; 779 - uint64_t i_rsvd : 51; 801 + uint64_t ii_itte6_regval; 802 + struct { 803 + uint64_t i_offset:5; 804 + uint64_t i_rsvd_1:3; 805 + uint64_t i_w_num:4; 806 + uint64_t i_iosp:1; 807 + uint64_t i_rsvd:51; 780 808 } ii_itte6_fld_s; 781 809 } ii_itte6_u_t; 782 810 783 - 784 811 /************************************************************************ 785 - * * 812 + * * 786 813 * Description: There are seven instances of translation table entry * 787 814 * registers. Each register maps a Shub Big Window to a 48-bit * 788 815 * address on Crosstalk. * ··· 804 833 * Crosstalk space addressable by the SHub is thus the lower * 805 834 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 806 835 * of this space can be accessed. * 807 - * * 836 + * * 808 837 ************************************************************************/ 809 838 810 839 typedef union ii_itte7_u { 811 - uint64_t ii_itte7_regval; 812 - struct { 813 - uint64_t i_offset : 5; 814 - uint64_t i_rsvd_1 : 3; 815 - uint64_t i_w_num : 4; 816 - uint64_t i_iosp : 1; 817 - uint64_t i_rsvd : 51; 840 + uint64_t ii_itte7_regval; 841 + struct { 842 + uint64_t i_offset:5; 843 + uint64_t i_rsvd_1:3; 844 + uint64_t i_w_num:4; 845 + uint64_t i_iosp:1; 846 + uint64_t i_rsvd:51; 818 847 } ii_itte7_fld_s; 819 848 } ii_itte7_u_t; 820 849 821 - 822 850 /************************************************************************ 823 - * * 851 + * * 824 852 * Description: There are 9 instances of this register, one per * 825 853 * actual widget in this implementation of SHub and Crossbow. * 826 854 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 838 868 * register; the write will correct the C field and capture its new * 839 869 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 840 870 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 841 - * . * 842 - * * 871 + * . * 872 + * * 843 873 ************************************************************************/ 844 874 845 875 typedef union ii_iprb0_u { 846 - uint64_t ii_iprb0_regval; 847 - struct { 848 - uint64_t i_c : 8; 849 - uint64_t i_na : 14; 850 - uint64_t i_rsvd_2 : 2; 851 - uint64_t i_nb : 14; 852 - uint64_t i_rsvd_1 : 2; 853 - uint64_t i_m : 2; 854 - uint64_t i_f : 1; 855 - uint64_t i_of_cnt : 5; 856 - uint64_t i_error : 1; 857 - uint64_t i_rd_to : 1; 858 - uint64_t i_spur_wr : 1; 859 - uint64_t i_spur_rd : 1; 860 - uint64_t i_rsvd : 11; 861 - uint64_t i_mult_err : 1; 876 + uint64_t ii_iprb0_regval; 877 + struct { 878 + uint64_t i_c:8; 879 + uint64_t i_na:14; 880 + uint64_t i_rsvd_2:2; 881 + uint64_t i_nb:14; 882 + uint64_t i_rsvd_1:2; 883 + uint64_t i_m:2; 884 + uint64_t i_f:1; 885 + uint64_t i_of_cnt:5; 886 + uint64_t i_error:1; 887 + uint64_t i_rd_to:1; 888 + uint64_t i_spur_wr:1; 889 + uint64_t i_spur_rd:1; 890 + uint64_t i_rsvd:11; 891 + uint64_t i_mult_err:1; 862 892 } ii_iprb0_fld_s; 863 893 } ii_iprb0_u_t; 864 894 865 - 866 895 /************************************************************************ 867 - * * 896 + * * 868 897 * Description: There are 9 instances of this register, one per * 869 898 * actual widget in this implementation of SHub and Crossbow. * 870 899 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 882 913 * register; the write will correct the C field and capture its new * 883 914 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 884 915 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 885 - * . * 886 - * * 916 + * . * 917 + * * 887 918 ************************************************************************/ 888 919 889 920 typedef union ii_iprb8_u { 890 - uint64_t ii_iprb8_regval; 891 - struct { 892 - uint64_t i_c : 8; 893 - uint64_t i_na : 14; 894 - uint64_t i_rsvd_2 : 2; 895 - uint64_t i_nb : 14; 896 - uint64_t i_rsvd_1 : 2; 897 - uint64_t i_m : 2; 898 - uint64_t i_f : 1; 899 - uint64_t i_of_cnt : 5; 900 - uint64_t i_error : 1; 901 - uint64_t i_rd_to : 1; 902 - uint64_t i_spur_wr : 1; 903 - uint64_t i_spur_rd : 1; 904 - uint64_t i_rsvd : 11; 905 - uint64_t i_mult_err : 1; 921 + uint64_t ii_iprb8_regval; 922 + struct { 923 + uint64_t i_c:8; 924 + uint64_t i_na:14; 925 + uint64_t i_rsvd_2:2; 926 + uint64_t i_nb:14; 927 + uint64_t i_rsvd_1:2; 928 + uint64_t i_m:2; 929 + uint64_t i_f:1; 930 + uint64_t i_of_cnt:5; 931 + uint64_t i_error:1; 932 + uint64_t i_rd_to:1; 933 + uint64_t i_spur_wr:1; 934 + uint64_t i_spur_rd:1; 935 + uint64_t i_rsvd:11; 936 + uint64_t i_mult_err:1; 906 937 } ii_iprb8_fld_s; 907 938 } ii_iprb8_u_t; 908 939 909 - 910 940 /************************************************************************ 911 - * * 941 + * * 912 942 * Description: There are 9 instances of this register, one per * 913 943 * actual widget in this implementation of SHub and Crossbow. * 914 944 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 926 958 * register; the write will correct the C field and capture its new * 927 959 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 928 960 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 929 - * . * 930 - * * 961 + * . * 962 + * * 931 963 ************************************************************************/ 932 964 933 965 typedef union ii_iprb9_u { 934 - uint64_t ii_iprb9_regval; 935 - struct { 936 - uint64_t i_c : 8; 937 - uint64_t i_na : 14; 938 - uint64_t i_rsvd_2 : 2; 939 - uint64_t i_nb : 14; 940 - uint64_t i_rsvd_1 : 2; 941 - uint64_t i_m : 2; 942 - uint64_t i_f : 1; 943 - uint64_t i_of_cnt : 5; 944 - uint64_t i_error : 1; 945 - uint64_t i_rd_to : 1; 946 - uint64_t i_spur_wr : 1; 947 - uint64_t i_spur_rd : 1; 948 - uint64_t i_rsvd : 11; 949 - uint64_t i_mult_err : 1; 966 + uint64_t ii_iprb9_regval; 967 + struct { 968 + uint64_t i_c:8; 969 + uint64_t i_na:14; 970 + uint64_t i_rsvd_2:2; 971 + uint64_t i_nb:14; 972 + uint64_t i_rsvd_1:2; 973 + uint64_t i_m:2; 974 + uint64_t i_f:1; 975 + uint64_t i_of_cnt:5; 976 + uint64_t i_error:1; 977 + uint64_t i_rd_to:1; 978 + uint64_t i_spur_wr:1; 979 + uint64_t i_spur_rd:1; 980 + uint64_t i_rsvd:11; 981 + uint64_t i_mult_err:1; 950 982 } ii_iprb9_fld_s; 951 983 } ii_iprb9_u_t; 952 984 953 - 954 985 /************************************************************************ 955 - * * 986 + * * 956 987 * Description: There are 9 instances of this register, one per * 957 988 * actual widget in this implementation of SHub and Crossbow. * 958 989 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 970 1003 * register; the write will correct the C field and capture its new * 971 1004 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 972 1005 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 973 - * * 974 - * * 1006 + * * 1007 + * * 975 1008 ************************************************************************/ 976 1009 977 1010 typedef union ii_iprba_u { 978 - uint64_t ii_iprba_regval; 979 - struct { 980 - uint64_t i_c : 8; 981 - uint64_t i_na : 14; 982 - uint64_t i_rsvd_2 : 2; 983 - uint64_t i_nb : 14; 984 - uint64_t i_rsvd_1 : 2; 985 - uint64_t i_m : 2; 986 - uint64_t i_f : 1; 987 - uint64_t i_of_cnt : 5; 988 - uint64_t i_error : 1; 989 - uint64_t i_rd_to : 1; 990 - uint64_t i_spur_wr : 1; 991 - uint64_t i_spur_rd : 1; 992 - uint64_t i_rsvd : 11; 993 - uint64_t i_mult_err : 1; 1011 + uint64_t ii_iprba_regval; 1012 + struct { 1013 + uint64_t i_c:8; 1014 + uint64_t i_na:14; 1015 + uint64_t i_rsvd_2:2; 1016 + uint64_t i_nb:14; 1017 + uint64_t i_rsvd_1:2; 1018 + uint64_t i_m:2; 1019 + uint64_t i_f:1; 1020 + uint64_t i_of_cnt:5; 1021 + uint64_t i_error:1; 1022 + uint64_t i_rd_to:1; 1023 + uint64_t i_spur_wr:1; 1024 + uint64_t i_spur_rd:1; 1025 + uint64_t i_rsvd:11; 1026 + uint64_t i_mult_err:1; 994 1027 } ii_iprba_fld_s; 995 1028 } ii_iprba_u_t; 996 1029 997 - 998 1030 /************************************************************************ 999 - * * 1031 + * * 1000 1032 * Description: There are 9 instances of this register, one per * 1001 1033 * actual widget in this implementation of SHub and Crossbow. * 1002 1034 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 1014 1048 * register; the write will correct the C field and capture its new * 1015 1049 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1016 1050 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1017 - * . * 1018 - * * 1051 + * . * 1052 + * * 1019 1053 ************************************************************************/ 1020 1054 1021 1055 typedef union ii_iprbb_u { 1022 - uint64_t ii_iprbb_regval; 1023 - struct { 1024 - uint64_t i_c : 8; 1025 - uint64_t i_na : 14; 1026 - uint64_t i_rsvd_2 : 2; 1027 - uint64_t i_nb : 14; 1028 - uint64_t i_rsvd_1 : 2; 1029 - uint64_t i_m : 2; 1030 - uint64_t i_f : 1; 1031 - uint64_t i_of_cnt : 5; 1032 - uint64_t i_error : 1; 1033 - uint64_t i_rd_to : 1; 1034 - uint64_t i_spur_wr : 1; 1035 - uint64_t i_spur_rd : 1; 1036 - uint64_t i_rsvd : 11; 1037 - uint64_t i_mult_err : 1; 1056 + uint64_t ii_iprbb_regval; 1057 + struct { 1058 + uint64_t i_c:8; 1059 + uint64_t i_na:14; 1060 + uint64_t i_rsvd_2:2; 1061 + uint64_t i_nb:14; 1062 + uint64_t i_rsvd_1:2; 1063 + uint64_t i_m:2; 1064 + uint64_t i_f:1; 1065 + uint64_t i_of_cnt:5; 1066 + uint64_t i_error:1; 1067 + uint64_t i_rd_to:1; 1068 + uint64_t i_spur_wr:1; 1069 + uint64_t i_spur_rd:1; 1070 + uint64_t i_rsvd:11; 1071 + uint64_t i_mult_err:1; 1038 1072 } ii_iprbb_fld_s; 1039 1073 } ii_iprbb_u_t; 1040 1074 1041 - 1042 1075 /************************************************************************ 1043 - * * 1076 + * * 1044 1077 * Description: There are 9 instances of this register, one per * 1045 1078 * actual widget in this implementation of SHub and Crossbow. * 1046 1079 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 1058 1093 * register; the write will correct the C field and capture its new * 1059 1094 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1060 1095 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1061 - * . * 1062 - * * 1096 + * . * 1097 + * * 1063 1098 ************************************************************************/ 1064 1099 1065 1100 typedef union ii_iprbc_u { 1066 - uint64_t ii_iprbc_regval; 1067 - struct { 1068 - uint64_t i_c : 8; 1069 - uint64_t i_na : 14; 1070 - uint64_t i_rsvd_2 : 2; 1071 - uint64_t i_nb : 14; 1072 - uint64_t i_rsvd_1 : 2; 1073 - uint64_t i_m : 2; 1074 - uint64_t i_f : 1; 1075 - uint64_t i_of_cnt : 5; 1076 - uint64_t i_error : 1; 1077 - uint64_t i_rd_to : 1; 1078 - uint64_t i_spur_wr : 1; 1079 - uint64_t i_spur_rd : 1; 1080 - uint64_t i_rsvd : 11; 1081 - uint64_t i_mult_err : 1; 1101 + uint64_t ii_iprbc_regval; 1102 + struct { 1103 + uint64_t i_c:8; 1104 + uint64_t i_na:14; 1105 + uint64_t i_rsvd_2:2; 1106 + uint64_t i_nb:14; 1107 + uint64_t i_rsvd_1:2; 1108 + uint64_t i_m:2; 1109 + uint64_t i_f:1; 1110 + uint64_t i_of_cnt:5; 1111 + uint64_t i_error:1; 1112 + uint64_t i_rd_to:1; 1113 + uint64_t i_spur_wr:1; 1114 + uint64_t i_spur_rd:1; 1115 + uint64_t i_rsvd:11; 1116 + uint64_t i_mult_err:1; 1082 1117 } ii_iprbc_fld_s; 1083 1118 } ii_iprbc_u_t; 1084 1119 1085 - 1086 1120 /************************************************************************ 1087 - * * 1121 + * * 1088 1122 * Description: There are 9 instances of this register, one per * 1089 1123 * actual widget in this implementation of SHub and Crossbow. * 1090 1124 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 1102 1138 * register; the write will correct the C field and capture its new * 1103 1139 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1104 1140 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1105 - * . * 1106 - * * 1141 + * . * 1142 + * * 1107 1143 ************************************************************************/ 1108 1144 1109 1145 typedef union ii_iprbd_u { 1110 - uint64_t ii_iprbd_regval; 1111 - struct { 1112 - uint64_t i_c : 8; 1113 - uint64_t i_na : 14; 1114 - uint64_t i_rsvd_2 : 2; 1115 - uint64_t i_nb : 14; 1116 - uint64_t i_rsvd_1 : 2; 1117 - uint64_t i_m : 2; 1118 - uint64_t i_f : 1; 1119 - uint64_t i_of_cnt : 5; 1120 - uint64_t i_error : 1; 1121 - uint64_t i_rd_to : 1; 1122 - uint64_t i_spur_wr : 1; 1123 - uint64_t i_spur_rd : 1; 1124 - uint64_t i_rsvd : 11; 1125 - uint64_t i_mult_err : 1; 1146 + uint64_t ii_iprbd_regval; 1147 + struct { 1148 + uint64_t i_c:8; 1149 + uint64_t i_na:14; 1150 + uint64_t i_rsvd_2:2; 1151 + uint64_t i_nb:14; 1152 + uint64_t i_rsvd_1:2; 1153 + uint64_t i_m:2; 1154 + uint64_t i_f:1; 1155 + uint64_t i_of_cnt:5; 1156 + uint64_t i_error:1; 1157 + uint64_t i_rd_to:1; 1158 + uint64_t i_spur_wr:1; 1159 + uint64_t i_spur_rd:1; 1160 + uint64_t i_rsvd:11; 1161 + uint64_t i_mult_err:1; 1126 1162 } ii_iprbd_fld_s; 1127 1163 } ii_iprbd_u_t; 1128 1164 1129 - 1130 1165 /************************************************************************ 1131 - * * 1166 + * * 1132 1167 * Description: There are 9 instances of this register, one per * 1133 1168 * actual widget in this implementation of SHub and Crossbow. * 1134 1169 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 1146 1183 * register; the write will correct the C field and capture its new * 1147 1184 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1148 1185 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1149 - * . * 1150 - * * 1186 + * . * 1187 + * * 1151 1188 ************************************************************************/ 1152 1189 1153 1190 typedef union ii_iprbe_u { 1154 - uint64_t ii_iprbe_regval; 1155 - struct { 1156 - uint64_t i_c : 8; 1157 - uint64_t i_na : 14; 1158 - uint64_t i_rsvd_2 : 2; 1159 - uint64_t i_nb : 14; 1160 - uint64_t i_rsvd_1 : 2; 1161 - uint64_t i_m : 2; 1162 - uint64_t i_f : 1; 1163 - uint64_t i_of_cnt : 5; 1164 - uint64_t i_error : 1; 1165 - uint64_t i_rd_to : 1; 1166 - uint64_t i_spur_wr : 1; 1167 - uint64_t i_spur_rd : 1; 1168 - uint64_t i_rsvd : 11; 1169 - uint64_t i_mult_err : 1; 1191 + uint64_t ii_iprbe_regval; 1192 + struct { 1193 + uint64_t i_c:8; 1194 + uint64_t i_na:14; 1195 + uint64_t i_rsvd_2:2; 1196 + uint64_t i_nb:14; 1197 + uint64_t i_rsvd_1:2; 1198 + uint64_t i_m:2; 1199 + uint64_t i_f:1; 1200 + uint64_t i_of_cnt:5; 1201 + uint64_t i_error:1; 1202 + uint64_t i_rd_to:1; 1203 + uint64_t i_spur_wr:1; 1204 + uint64_t i_spur_rd:1; 1205 + uint64_t i_rsvd:11; 1206 + uint64_t i_mult_err:1; 1170 1207 } ii_iprbe_fld_s; 1171 1208 } ii_iprbe_u_t; 1172 1209 1173 - 1174 1210 /************************************************************************ 1175 - * * 1211 + * * 1176 1212 * Description: There are 9 instances of this register, one per * 1177 1213 * actual widget in this implementation of Shub and Crossbow. * 1178 1214 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * ··· 1190 1228 * register; the write will correct the C field and capture its new * 1191 1229 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1192 1230 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1193 - * . * 1194 - * * 1231 + * . * 1232 + * * 1195 1233 ************************************************************************/ 1196 1234 1197 1235 typedef union ii_iprbf_u { 1198 - uint64_t ii_iprbf_regval; 1199 - struct { 1200 - uint64_t i_c : 8; 1201 - uint64_t i_na : 14; 1202 - uint64_t i_rsvd_2 : 2; 1203 - uint64_t i_nb : 14; 1204 - uint64_t i_rsvd_1 : 2; 1205 - uint64_t i_m : 2; 1206 - uint64_t i_f : 1; 1207 - uint64_t i_of_cnt : 5; 1208 - uint64_t i_error : 1; 1209 - uint64_t i_rd_to : 1; 1210 - uint64_t i_spur_wr : 1; 1211 - uint64_t i_spur_rd : 1; 1212 - uint64_t i_rsvd : 11; 1213 - uint64_t i_mult_err : 1; 1214 - } ii_iprbe_fld_s; 1236 + uint64_t ii_iprbf_regval; 1237 + struct { 1238 + uint64_t i_c:8; 1239 + uint64_t i_na:14; 1240 + uint64_t i_rsvd_2:2; 1241 + uint64_t i_nb:14; 1242 + uint64_t i_rsvd_1:2; 1243 + uint64_t i_m:2; 1244 + uint64_t i_f:1; 1245 + uint64_t i_of_cnt:5; 1246 + uint64_t i_error:1; 1247 + uint64_t i_rd_to:1; 1248 + uint64_t i_spur_wr:1; 1249 + uint64_t i_spur_rd:1; 1250 + uint64_t i_rsvd:11; 1251 + uint64_t i_mult_err:1; 1252 + } ii_iprbe_fld_s; 1215 1253 } ii_iprbf_u_t; 1216 1254 1217 - 1218 1255 /************************************************************************ 1219 - * * 1256 + * * 1220 1257 * This register specifies the timeout value to use for monitoring * 1221 1258 * Crosstalk credits which are used outbound to Crosstalk. An * 1222 1259 * internal counter called the Crosstalk Credit Timeout Counter * ··· 1228 1267 * Crosstalk Credit Timeout has occurred. The internal counter is not * 1229 1268 * readable from software, and stops counting at its maximum value, * 1230 1269 * so it cannot cause more than one interrupt. * 1231 - * * 1270 + * * 1232 1271 ************************************************************************/ 1233 1272 1234 1273 typedef union ii_ixcc_u { 1235 - uint64_t ii_ixcc_regval; 1236 - struct { 1237 - uint64_t i_time_out : 26; 1238 - uint64_t i_rsvd : 38; 1274 + uint64_t ii_ixcc_regval; 1275 + struct { 1276 + uint64_t i_time_out:26; 1277 + uint64_t i_rsvd:38; 1239 1278 } ii_ixcc_fld_s; 1240 1279 } ii_ixcc_u_t; 1241 1280 1242 - 1243 1281 /************************************************************************ 1244 - * * 1282 + * * 1245 1283 * Description: This register qualifies all the PIO and DMA * 1246 1284 * operations launched from widget 0 towards the SHub. In * 1247 1285 * addition, it also qualifies accesses by the BTE streams. * ··· 1252 1292 * the Wx_IAC field. The bits in this field are set by writing a 1 to * 1253 1293 * them. Incoming replies from Crosstalk are not subject to this * 1254 1294 * access control mechanism. * 1255 - * * 1295 + * * 1256 1296 ************************************************************************/ 1257 1297 1258 1298 typedef union ii_imem_u { 1259 - uint64_t ii_imem_regval; 1260 - struct { 1261 - uint64_t i_w0_esd : 1; 1262 - uint64_t i_rsvd_3 : 3; 1263 - uint64_t i_b0_esd : 1; 1264 - uint64_t i_rsvd_2 : 3; 1265 - uint64_t i_b1_esd : 1; 1266 - uint64_t i_rsvd_1 : 3; 1267 - uint64_t i_clr_precise : 1; 1268 - uint64_t i_rsvd : 51; 1299 + uint64_t ii_imem_regval; 1300 + struct { 1301 + uint64_t i_w0_esd:1; 1302 + uint64_t i_rsvd_3:3; 1303 + uint64_t i_b0_esd:1; 1304 + uint64_t i_rsvd_2:3; 1305 + uint64_t i_b1_esd:1; 1306 + uint64_t i_rsvd_1:3; 1307 + uint64_t i_clr_precise:1; 1308 + uint64_t i_rsvd:51; 1269 1309 } ii_imem_fld_s; 1270 1310 } ii_imem_u_t; 1271 1311 1272 - 1273 - 1274 1312 /************************************************************************ 1275 - * * 1313 + * * 1276 1314 * Description: This register specifies the timeout value to use for * 1277 1315 * monitoring Crosstalk tail flits coming into the Shub in the * 1278 1316 * TAIL_TO field. An internal counter associated with this register * ··· 1290 1332 * the value in the RRSP_TO field, a Read Response Timeout has * 1291 1333 * occurred, and error handling occurs as described in the Error * 1292 1334 * Handling section of this document. * 1293 - * * 1335 + * * 1294 1336 ************************************************************************/ 1295 1337 1296 1338 typedef union ii_ixtt_u { 1297 - uint64_t ii_ixtt_regval; 1298 - struct { 1299 - uint64_t i_tail_to : 26; 1300 - uint64_t i_rsvd_1 : 6; 1301 - uint64_t i_rrsp_ps : 23; 1302 - uint64_t i_rrsp_to : 5; 1303 - uint64_t i_rsvd : 4; 1339 + uint64_t ii_ixtt_regval; 1340 + struct { 1341 + uint64_t i_tail_to:26; 1342 + uint64_t i_rsvd_1:6; 1343 + uint64_t i_rrsp_ps:23; 1344 + uint64_t i_rrsp_to:5; 1345 + uint64_t i_rsvd:4; 1304 1346 } ii_ixtt_fld_s; 1305 1347 } ii_ixtt_u_t; 1306 1348 1307 - 1308 1349 /************************************************************************ 1309 - * * 1350 + * * 1310 1351 * Writing a 1 to the fields of this register clears the appropriate * 1311 1352 * error bits in other areas of SHub. Note that when the * 1312 1353 * E_PRB_x bits are used to clear error bits in PRB registers, * 1313 1354 * SPUR_RD and SPUR_WR may persist, because they require additional * 1314 1355 * action to clear them. See the IPRBx and IXSS Register * 1315 1356 * specifications. * 1316 - * * 1357 + * * 1317 1358 ************************************************************************/ 1318 1359 1319 1360 typedef union ii_ieclr_u { 1320 - uint64_t ii_ieclr_regval; 1321 - struct { 1322 - uint64_t i_e_prb_0 : 1; 1323 - uint64_t i_rsvd : 7; 1324 - uint64_t i_e_prb_8 : 1; 1325 - uint64_t i_e_prb_9 : 1; 1326 - uint64_t i_e_prb_a : 1; 1327 - uint64_t i_e_prb_b : 1; 1328 - uint64_t i_e_prb_c : 1; 1329 - uint64_t i_e_prb_d : 1; 1330 - uint64_t i_e_prb_e : 1; 1331 - uint64_t i_e_prb_f : 1; 1332 - uint64_t i_e_crazy : 1; 1333 - uint64_t i_e_bte_0 : 1; 1334 - uint64_t i_e_bte_1 : 1; 1335 - uint64_t i_reserved_1 : 10; 1336 - uint64_t i_spur_rd_hdr : 1; 1337 - uint64_t i_cam_intr_to : 1; 1338 - uint64_t i_cam_overflow : 1; 1339 - uint64_t i_cam_read_miss : 1; 1340 - uint64_t i_ioq_rep_underflow : 1; 1341 - uint64_t i_ioq_req_underflow : 1; 1342 - uint64_t i_ioq_rep_overflow : 1; 1343 - uint64_t i_ioq_req_overflow : 1; 1344 - uint64_t i_iiq_rep_overflow : 1; 1345 - uint64_t i_iiq_req_overflow : 1; 1346 - uint64_t i_ii_xn_rep_cred_overflow : 1; 1347 - uint64_t i_ii_xn_req_cred_overflow : 1; 1348 - uint64_t i_ii_xn_invalid_cmd : 1; 1349 - uint64_t i_xn_ii_invalid_cmd : 1; 1350 - uint64_t i_reserved_2 : 21; 1361 + uint64_t ii_ieclr_regval; 1362 + struct { 1363 + uint64_t i_e_prb_0:1; 1364 + uint64_t i_rsvd:7; 1365 + uint64_t i_e_prb_8:1; 1366 + uint64_t i_e_prb_9:1; 1367 + uint64_t i_e_prb_a:1; 1368 + uint64_t i_e_prb_b:1; 1369 + uint64_t i_e_prb_c:1; 1370 + uint64_t i_e_prb_d:1; 1371 + uint64_t i_e_prb_e:1; 1372 + uint64_t i_e_prb_f:1; 1373 + uint64_t i_e_crazy:1; 1374 + uint64_t i_e_bte_0:1; 1375 + uint64_t i_e_bte_1:1; 1376 + uint64_t i_reserved_1:10; 1377 + uint64_t i_spur_rd_hdr:1; 1378 + uint64_t i_cam_intr_to:1; 1379 + uint64_t i_cam_overflow:1; 1380 + uint64_t i_cam_read_miss:1; 1381 + uint64_t i_ioq_rep_underflow:1; 1382 + uint64_t i_ioq_req_underflow:1; 1383 + uint64_t i_ioq_rep_overflow:1; 1384 + uint64_t i_ioq_req_overflow:1; 1385 + uint64_t i_iiq_rep_overflow:1; 1386 + uint64_t i_iiq_req_overflow:1; 1387 + uint64_t i_ii_xn_rep_cred_overflow:1; 1388 + uint64_t i_ii_xn_req_cred_overflow:1; 1389 + uint64_t i_ii_xn_invalid_cmd:1; 1390 + uint64_t i_xn_ii_invalid_cmd:1; 1391 + uint64_t i_reserved_2:21; 1351 1392 } ii_ieclr_fld_s; 1352 1393 } ii_ieclr_u_t; 1353 1394 1354 - 1355 1395 /************************************************************************ 1356 - * * 1396 + * * 1357 1397 * This register controls both BTEs. SOFT_RESET is intended for * 1358 1398 * recovery after an error. COUNT controls the total number of CRBs * 1359 1399 * that both BTEs (combined) can use, which affects total BTE * 1360 1400 * bandwidth. * 1361 - * * 1401 + * * 1362 1402 ************************************************************************/ 1363 1403 1364 1404 typedef union ii_ibcr_u { 1365 - uint64_t ii_ibcr_regval; 1366 - struct { 1367 - uint64_t i_count : 4; 1368 - uint64_t i_rsvd_1 : 4; 1369 - uint64_t i_soft_reset : 1; 1370 - uint64_t i_rsvd : 55; 1405 + uint64_t ii_ibcr_regval; 1406 + struct { 1407 + uint64_t i_count:4; 1408 + uint64_t i_rsvd_1:4; 1409 + uint64_t i_soft_reset:1; 1410 + uint64_t i_rsvd:55; 1371 1411 } ii_ibcr_fld_s; 1372 1412 } ii_ibcr_u_t; 1373 1413 1374 - 1375 1414 /************************************************************************ 1376 - * * 1415 + * * 1377 1416 * This register contains the header of a spurious read response * 1378 1417 * received from Crosstalk. A spurious read response is defined as a * 1379 1418 * read response received by II from a widget for which (1) the SIDN * ··· 1395 1440 * will be set. Any SPUR_RD bits in any other PRB registers indicate * 1396 1441 * spurious messages from other widets which were detected after the * 1397 1442 * header was captured.. * 1398 - * * 1443 + * * 1399 1444 ************************************************************************/ 1400 1445 1401 1446 typedef union ii_ixsm_u { 1402 - uint64_t ii_ixsm_regval; 1403 - struct { 1404 - uint64_t i_byte_en : 32; 1405 - uint64_t i_reserved : 1; 1406 - uint64_t i_tag : 3; 1407 - uint64_t i_alt_pactyp : 4; 1408 - uint64_t i_bo : 1; 1409 - uint64_t i_error : 1; 1410 - uint64_t i_vbpm : 1; 1411 - uint64_t i_gbr : 1; 1412 - uint64_t i_ds : 2; 1413 - uint64_t i_ct : 1; 1414 - uint64_t i_tnum : 5; 1415 - uint64_t i_pactyp : 4; 1416 - uint64_t i_sidn : 4; 1417 - uint64_t i_didn : 4; 1447 + uint64_t ii_ixsm_regval; 1448 + struct { 1449 + uint64_t i_byte_en:32; 1450 + uint64_t i_reserved:1; 1451 + uint64_t i_tag:3; 1452 + uint64_t i_alt_pactyp:4; 1453 + uint64_t i_bo:1; 1454 + uint64_t i_error:1; 1455 + uint64_t i_vbpm:1; 1456 + uint64_t i_gbr:1; 1457 + uint64_t i_ds:2; 1458 + uint64_t i_ct:1; 1459 + uint64_t i_tnum:5; 1460 + uint64_t i_pactyp:4; 1461 + uint64_t i_sidn:4; 1462 + uint64_t i_didn:4; 1418 1463 } ii_ixsm_fld_s; 1419 1464 } ii_ixsm_u_t; 1420 1465 1421 - 1422 1466 /************************************************************************ 1423 - * * 1467 + * * 1424 1468 * This register contains the sideband bits of a spurious read * 1425 1469 * response received from Crosstalk. * 1426 - * * 1470 + * * 1427 1471 ************************************************************************/ 1428 1472 1429 1473 typedef union ii_ixss_u { 1430 - uint64_t ii_ixss_regval; 1431 - struct { 1432 - uint64_t i_sideband : 8; 1433 - uint64_t i_rsvd : 55; 1434 - uint64_t i_valid : 1; 1474 + uint64_t ii_ixss_regval; 1475 + struct { 1476 + uint64_t i_sideband:8; 1477 + uint64_t i_rsvd:55; 1478 + uint64_t i_valid:1; 1435 1479 } ii_ixss_fld_s; 1436 1480 } ii_ixss_u_t; 1437 1481 1438 - 1439 1482 /************************************************************************ 1440 - * * 1483 + * * 1441 1484 * This register enables software to access the II LLP's test port. * 1442 1485 * Refer to the LLP 2.5 documentation for an explanation of the test * 1443 1486 * port. Software can write to this register to program the values * ··· 1443 1490 * TestMask and TestSeed). Similarly, software can read from this * 1444 1491 * register to obtain the values of the test port's status outputs * 1445 1492 * (TestCBerr, TestValid and TestData). * 1446 - * * 1493 + * * 1447 1494 ************************************************************************/ 1448 1495 1449 1496 typedef union ii_ilct_u { 1450 - uint64_t ii_ilct_regval; 1451 - struct { 1452 - uint64_t i_test_seed : 20; 1453 - uint64_t i_test_mask : 8; 1454 - uint64_t i_test_data : 20; 1455 - uint64_t i_test_valid : 1; 1456 - uint64_t i_test_cberr : 1; 1457 - uint64_t i_test_flit : 3; 1458 - uint64_t i_test_clear : 1; 1459 - uint64_t i_test_err_capture : 1; 1460 - uint64_t i_rsvd : 9; 1497 + uint64_t ii_ilct_regval; 1498 + struct { 1499 + uint64_t i_test_seed:20; 1500 + uint64_t i_test_mask:8; 1501 + uint64_t i_test_data:20; 1502 + uint64_t i_test_valid:1; 1503 + uint64_t i_test_cberr:1; 1504 + uint64_t i_test_flit:3; 1505 + uint64_t i_test_clear:1; 1506 + uint64_t i_test_err_capture:1; 1507 + uint64_t i_rsvd:9; 1461 1508 } ii_ilct_fld_s; 1462 1509 } ii_ilct_u_t; 1463 1510 1464 - 1465 1511 /************************************************************************ 1466 - * * 1512 + * * 1467 1513 * If the II detects an illegal incoming Duplonet packet (request or * 1468 1514 * reply) when VALID==0 in the IIEPH1 register, then it saves the * 1469 1515 * contents of the packet's header flit in the IIEPH1 and IIEPH2 * ··· 1478 1526 * packet when VALID==1 in the IIEPH1 register, then it merely sets * 1479 1527 * the OVERRUN bit to indicate that a subsequent error has happened, * 1480 1528 * and does nothing further. * 1481 - * * 1529 + * * 1482 1530 ************************************************************************/ 1483 1531 1484 1532 typedef union ii_iieph1_u { 1485 - uint64_t ii_iieph1_regval; 1486 - struct { 1487 - uint64_t i_command : 7; 1488 - uint64_t i_rsvd_5 : 1; 1489 - uint64_t i_suppl : 14; 1490 - uint64_t i_rsvd_4 : 1; 1491 - uint64_t i_source : 14; 1492 - uint64_t i_rsvd_3 : 1; 1493 - uint64_t i_err_type : 4; 1494 - uint64_t i_rsvd_2 : 4; 1495 - uint64_t i_overrun : 1; 1496 - uint64_t i_rsvd_1 : 3; 1497 - uint64_t i_valid : 1; 1498 - uint64_t i_rsvd : 13; 1533 + uint64_t ii_iieph1_regval; 1534 + struct { 1535 + uint64_t i_command:7; 1536 + uint64_t i_rsvd_5:1; 1537 + uint64_t i_suppl:14; 1538 + uint64_t i_rsvd_4:1; 1539 + uint64_t i_source:14; 1540 + uint64_t i_rsvd_3:1; 1541 + uint64_t i_err_type:4; 1542 + uint64_t i_rsvd_2:4; 1543 + uint64_t i_overrun:1; 1544 + uint64_t i_rsvd_1:3; 1545 + uint64_t i_valid:1; 1546 + uint64_t i_rsvd:13; 1499 1547 } ii_iieph1_fld_s; 1500 1548 } ii_iieph1_u_t; 1501 1549 1502 - 1503 1550 /************************************************************************ 1504 - * * 1551 + * * 1505 1552 * This register holds the Address field from the header flit of an * 1506 1553 * incoming erroneous Duplonet packet, along with the tail bit which * 1507 1554 * accompanied this header flit. This register is essentially an * 1508 1555 * extension of IIEPH1. Two registers were necessary because the 64 * 1509 1556 * bits available in only a single register were insufficient to * 1510 1557 * capture the entire header flit of an erroneous packet. * 1511 - * * 1558 + * * 1512 1559 ************************************************************************/ 1513 1560 1514 1561 typedef union ii_iieph2_u { 1515 - uint64_t ii_iieph2_regval; 1516 - struct { 1517 - uint64_t i_rsvd_0 : 3; 1518 - uint64_t i_address : 47; 1519 - uint64_t i_rsvd_1 : 10; 1520 - uint64_t i_tail : 1; 1521 - uint64_t i_rsvd : 3; 1562 + uint64_t ii_iieph2_regval; 1563 + struct { 1564 + uint64_t i_rsvd_0:3; 1565 + uint64_t i_address:47; 1566 + uint64_t i_rsvd_1:10; 1567 + uint64_t i_tail:1; 1568 + uint64_t i_rsvd:3; 1522 1569 } ii_iieph2_fld_s; 1523 1570 } ii_iieph2_u_t; 1524 1571 1525 - 1526 1572 /******************************/ 1527 1573 1528 - 1529 - 1530 1574 /************************************************************************ 1531 - * * 1575 + * * 1532 1576 * This register's value is a bit vector that guards access from SXBs * 1533 1577 * to local registers within the II as well as to external Crosstalk * 1534 1578 * widgets * 1535 - * * 1579 + * * 1536 1580 ************************************************************************/ 1537 1581 1538 1582 typedef union ii_islapr_u { 1539 - uint64_t ii_islapr_regval; 1540 - struct { 1541 - uint64_t i_region : 64; 1583 + uint64_t ii_islapr_regval; 1584 + struct { 1585 + uint64_t i_region:64; 1542 1586 } ii_islapr_fld_s; 1543 1587 } ii_islapr_u_t; 1544 1588 1545 - 1546 1589 /************************************************************************ 1547 - * * 1590 + * * 1548 1591 * A write to this register of the 56-bit value "Pup+Bun" will cause * 1549 1592 * the bit in the ISLAPR register corresponding to the region of the * 1550 1593 * requestor to be set (access allowed). ( 1551 - * * 1594 + * * 1552 1595 ************************************************************************/ 1553 1596 1554 1597 typedef union ii_islapo_u { 1555 - uint64_t ii_islapo_regval; 1556 - struct { 1557 - uint64_t i_io_sbx_ovrride : 56; 1558 - uint64_t i_rsvd : 8; 1598 + uint64_t ii_islapo_regval; 1599 + struct { 1600 + uint64_t i_io_sbx_ovrride:56; 1601 + uint64_t i_rsvd:8; 1559 1602 } ii_islapo_fld_s; 1560 1603 } ii_islapo_u_t; 1561 1604 1562 1605 /************************************************************************ 1563 - * * 1606 + * * 1564 1607 * Determines how long the wrapper will wait aftr an interrupt is * 1565 1608 * initially issued from the II before it times out the outstanding * 1566 1609 * interrupt and drops it from the interrupt queue. * 1567 - * * 1610 + * * 1568 1611 ************************************************************************/ 1569 1612 1570 1613 typedef union ii_iwi_u { 1571 - uint64_t ii_iwi_regval; 1572 - struct { 1573 - uint64_t i_prescale : 24; 1574 - uint64_t i_rsvd : 8; 1575 - uint64_t i_timeout : 8; 1576 - uint64_t i_rsvd1 : 8; 1577 - uint64_t i_intrpt_retry_period : 8; 1578 - uint64_t i_rsvd2 : 8; 1614 + uint64_t ii_iwi_regval; 1615 + struct { 1616 + uint64_t i_prescale:24; 1617 + uint64_t i_rsvd:8; 1618 + uint64_t i_timeout:8; 1619 + uint64_t i_rsvd1:8; 1620 + uint64_t i_intrpt_retry_period:8; 1621 + uint64_t i_rsvd2:8; 1579 1622 } ii_iwi_fld_s; 1580 1623 } ii_iwi_u_t; 1581 1624 1582 1625 /************************************************************************ 1583 - * * 1626 + * * 1584 1627 * Log errors which have occurred in the II wrapper. The errors are * 1585 1628 * cleared by writing to the IECLR register. * 1586 - * * 1629 + * * 1587 1630 ************************************************************************/ 1588 1631 1589 1632 typedef union ii_iwel_u { 1590 - uint64_t ii_iwel_regval; 1591 - struct { 1592 - uint64_t i_intr_timed_out : 1; 1593 - uint64_t i_rsvd : 7; 1594 - uint64_t i_cam_overflow : 1; 1595 - uint64_t i_cam_read_miss : 1; 1596 - uint64_t i_rsvd1 : 2; 1597 - uint64_t i_ioq_rep_underflow : 1; 1598 - uint64_t i_ioq_req_underflow : 1; 1599 - uint64_t i_ioq_rep_overflow : 1; 1600 - uint64_t i_ioq_req_overflow : 1; 1601 - uint64_t i_iiq_rep_overflow : 1; 1602 - uint64_t i_iiq_req_overflow : 1; 1603 - uint64_t i_rsvd2 : 6; 1604 - uint64_t i_ii_xn_rep_cred_over_under: 1; 1605 - uint64_t i_ii_xn_req_cred_over_under: 1; 1606 - uint64_t i_rsvd3 : 6; 1607 - uint64_t i_ii_xn_invalid_cmd : 1; 1608 - uint64_t i_xn_ii_invalid_cmd : 1; 1609 - uint64_t i_rsvd4 : 30; 1633 + uint64_t ii_iwel_regval; 1634 + struct { 1635 + uint64_t i_intr_timed_out:1; 1636 + uint64_t i_rsvd:7; 1637 + uint64_t i_cam_overflow:1; 1638 + uint64_t i_cam_read_miss:1; 1639 + uint64_t i_rsvd1:2; 1640 + uint64_t i_ioq_rep_underflow:1; 1641 + uint64_t i_ioq_req_underflow:1; 1642 + uint64_t i_ioq_rep_overflow:1; 1643 + uint64_t i_ioq_req_overflow:1; 1644 + uint64_t i_iiq_rep_overflow:1; 1645 + uint64_t i_iiq_req_overflow:1; 1646 + uint64_t i_rsvd2:6; 1647 + uint64_t i_ii_xn_rep_cred_over_under:1; 1648 + uint64_t i_ii_xn_req_cred_over_under:1; 1649 + uint64_t i_rsvd3:6; 1650 + uint64_t i_ii_xn_invalid_cmd:1; 1651 + uint64_t i_xn_ii_invalid_cmd:1; 1652 + uint64_t i_rsvd4:30; 1610 1653 } ii_iwel_fld_s; 1611 1654 } ii_iwel_u_t; 1612 1655 1613 1656 /************************************************************************ 1614 - * * 1657 + * * 1615 1658 * Controls the II wrapper. * 1616 - * * 1659 + * * 1617 1660 ************************************************************************/ 1618 1661 1619 1662 typedef union ii_iwc_u { 1620 - uint64_t ii_iwc_regval; 1621 - struct { 1622 - uint64_t i_dma_byte_swap : 1; 1623 - uint64_t i_rsvd : 3; 1624 - uint64_t i_cam_read_lines_reset : 1; 1625 - uint64_t i_rsvd1 : 3; 1626 - uint64_t i_ii_xn_cred_over_under_log: 1; 1627 - uint64_t i_rsvd2 : 19; 1628 - uint64_t i_xn_rep_iq_depth : 5; 1629 - uint64_t i_rsvd3 : 3; 1630 - uint64_t i_xn_req_iq_depth : 5; 1631 - uint64_t i_rsvd4 : 3; 1632 - uint64_t i_iiq_depth : 6; 1633 - uint64_t i_rsvd5 : 12; 1634 - uint64_t i_force_rep_cred : 1; 1635 - uint64_t i_force_req_cred : 1; 1663 + uint64_t ii_iwc_regval; 1664 + struct { 1665 + uint64_t i_dma_byte_swap:1; 1666 + uint64_t i_rsvd:3; 1667 + uint64_t i_cam_read_lines_reset:1; 1668 + uint64_t i_rsvd1:3; 1669 + uint64_t i_ii_xn_cred_over_under_log:1; 1670 + uint64_t i_rsvd2:19; 1671 + uint64_t i_xn_rep_iq_depth:5; 1672 + uint64_t i_rsvd3:3; 1673 + uint64_t i_xn_req_iq_depth:5; 1674 + uint64_t i_rsvd4:3; 1675 + uint64_t i_iiq_depth:6; 1676 + uint64_t i_rsvd5:12; 1677 + uint64_t i_force_rep_cred:1; 1678 + uint64_t i_force_req_cred:1; 1636 1679 } ii_iwc_fld_s; 1637 1680 } ii_iwc_u_t; 1638 1681 1639 1682 /************************************************************************ 1640 - * * 1683 + * * 1641 1684 * Status in the II wrapper. * 1642 - * * 1685 + * * 1643 1686 ************************************************************************/ 1644 1687 1645 1688 typedef union ii_iws_u { 1646 - uint64_t ii_iws_regval; 1647 - struct { 1648 - uint64_t i_xn_rep_iq_credits : 5; 1649 - uint64_t i_rsvd : 3; 1650 - uint64_t i_xn_req_iq_credits : 5; 1651 - uint64_t i_rsvd1 : 51; 1689 + uint64_t ii_iws_regval; 1690 + struct { 1691 + uint64_t i_xn_rep_iq_credits:5; 1692 + uint64_t i_rsvd:3; 1693 + uint64_t i_xn_req_iq_credits:5; 1694 + uint64_t i_rsvd1:51; 1652 1695 } ii_iws_fld_s; 1653 1696 } ii_iws_u_t; 1654 1697 1655 1698 /************************************************************************ 1656 - * * 1699 + * * 1657 1700 * Masks errors in the IWEL register. * 1658 - * * 1701 + * * 1659 1702 ************************************************************************/ 1660 1703 1661 1704 typedef union ii_iweim_u { 1662 - uint64_t ii_iweim_regval; 1663 - struct { 1664 - uint64_t i_intr_timed_out : 1; 1665 - uint64_t i_rsvd : 7; 1666 - uint64_t i_cam_overflow : 1; 1667 - uint64_t i_cam_read_miss : 1; 1668 - uint64_t i_rsvd1 : 2; 1669 - uint64_t i_ioq_rep_underflow : 1; 1670 - uint64_t i_ioq_req_underflow : 1; 1671 - uint64_t i_ioq_rep_overflow : 1; 1672 - uint64_t i_ioq_req_overflow : 1; 1673 - uint64_t i_iiq_rep_overflow : 1; 1674 - uint64_t i_iiq_req_overflow : 1; 1675 - uint64_t i_rsvd2 : 6; 1676 - uint64_t i_ii_xn_rep_cred_overflow : 1; 1677 - uint64_t i_ii_xn_req_cred_overflow : 1; 1678 - uint64_t i_rsvd3 : 6; 1679 - uint64_t i_ii_xn_invalid_cmd : 1; 1680 - uint64_t i_xn_ii_invalid_cmd : 1; 1681 - uint64_t i_rsvd4 : 30; 1705 + uint64_t ii_iweim_regval; 1706 + struct { 1707 + uint64_t i_intr_timed_out:1; 1708 + uint64_t i_rsvd:7; 1709 + uint64_t i_cam_overflow:1; 1710 + uint64_t i_cam_read_miss:1; 1711 + uint64_t i_rsvd1:2; 1712 + uint64_t i_ioq_rep_underflow:1; 1713 + uint64_t i_ioq_req_underflow:1; 1714 + uint64_t i_ioq_rep_overflow:1; 1715 + uint64_t i_ioq_req_overflow:1; 1716 + uint64_t i_iiq_rep_overflow:1; 1717 + uint64_t i_iiq_req_overflow:1; 1718 + uint64_t i_rsvd2:6; 1719 + uint64_t i_ii_xn_rep_cred_overflow:1; 1720 + uint64_t i_ii_xn_req_cred_overflow:1; 1721 + uint64_t i_rsvd3:6; 1722 + uint64_t i_ii_xn_invalid_cmd:1; 1723 + uint64_t i_xn_ii_invalid_cmd:1; 1724 + uint64_t i_rsvd4:30; 1682 1725 } ii_iweim_fld_s; 1683 1726 } ii_iweim_u_t; 1684 1727 1685 - 1686 1728 /************************************************************************ 1687 - * * 1729 + * * 1688 1730 * A write to this register causes a particular field in the * 1689 1731 * corresponding widget's PRB entry to be adjusted up or down by 1. * 1690 1732 * This counter should be used when recovering from error and reset * 1691 1733 * conditions. Note that software would be capable of causing * 1692 1734 * inadvertent overflow or underflow of these counters. * 1693 - * * 1735 + * * 1694 1736 ************************************************************************/ 1695 1737 1696 1738 typedef union ii_ipca_u { 1697 - uint64_t ii_ipca_regval; 1698 - struct { 1699 - uint64_t i_wid : 4; 1700 - uint64_t i_adjust : 1; 1701 - uint64_t i_rsvd_1 : 3; 1702 - uint64_t i_field : 2; 1703 - uint64_t i_rsvd : 54; 1739 + uint64_t ii_ipca_regval; 1740 + struct { 1741 + uint64_t i_wid:4; 1742 + uint64_t i_adjust:1; 1743 + uint64_t i_rsvd_1:3; 1744 + uint64_t i_field:2; 1745 + uint64_t i_rsvd:54; 1704 1746 } ii_ipca_fld_s; 1705 1747 } ii_ipca_u_t; 1706 1748 1707 - 1708 1749 /************************************************************************ 1709 - * * 1750 + * * 1710 1751 * There are 8 instances of this register. This register contains * 1711 1752 * the information that the II has to remember once it has launched a * 1712 1753 * PIO Read operation. The contents are used to form the correct * 1713 1754 * Router Network packet and direct the Crosstalk reply to the * 1714 1755 * appropriate processor. * 1715 - * * 1756 + * * 1716 1757 ************************************************************************/ 1717 1758 1718 - 1719 1759 typedef union ii_iprte0a_u { 1720 - uint64_t ii_iprte0a_regval; 1721 - struct { 1722 - uint64_t i_rsvd_1 : 54; 1723 - uint64_t i_widget : 4; 1724 - uint64_t i_to_cnt : 5; 1725 - uint64_t i_vld : 1; 1760 + uint64_t ii_iprte0a_regval; 1761 + struct { 1762 + uint64_t i_rsvd_1:54; 1763 + uint64_t i_widget:4; 1764 + uint64_t i_to_cnt:5; 1765 + uint64_t i_vld:1; 1726 1766 } ii_iprte0a_fld_s; 1727 1767 } ii_iprte0a_u_t; 1728 1768 1729 - 1730 1769 /************************************************************************ 1731 - * * 1770 + * * 1732 1771 * There are 8 instances of this register. This register contains * 1733 1772 * the information that the II has to remember once it has launched a * 1734 1773 * PIO Read operation. The contents are used to form the correct * 1735 1774 * Router Network packet and direct the Crosstalk reply to the * 1736 1775 * appropriate processor. * 1737 - * * 1776 + * * 1738 1777 ************************************************************************/ 1739 1778 1740 1779 typedef union ii_iprte1a_u { 1741 - uint64_t ii_iprte1a_regval; 1742 - struct { 1743 - uint64_t i_rsvd_1 : 54; 1744 - uint64_t i_widget : 4; 1745 - uint64_t i_to_cnt : 5; 1746 - uint64_t i_vld : 1; 1780 + uint64_t ii_iprte1a_regval; 1781 + struct { 1782 + uint64_t i_rsvd_1:54; 1783 + uint64_t i_widget:4; 1784 + uint64_t i_to_cnt:5; 1785 + uint64_t i_vld:1; 1747 1786 } ii_iprte1a_fld_s; 1748 1787 } ii_iprte1a_u_t; 1749 1788 1750 - 1751 1789 /************************************************************************ 1752 - * * 1790 + * * 1753 1791 * There are 8 instances of this register. This register contains * 1754 1792 * the information that the II has to remember once it has launched a * 1755 1793 * PIO Read operation. The contents are used to form the correct * 1756 1794 * Router Network packet and direct the Crosstalk reply to the * 1757 1795 * appropriate processor. * 1758 - * * 1796 + * * 1759 1797 ************************************************************************/ 1760 1798 1761 1799 typedef union ii_iprte2a_u { 1762 - uint64_t ii_iprte2a_regval; 1763 - struct { 1764 - uint64_t i_rsvd_1 : 54; 1765 - uint64_t i_widget : 4; 1766 - uint64_t i_to_cnt : 5; 1767 - uint64_t i_vld : 1; 1800 + uint64_t ii_iprte2a_regval; 1801 + struct { 1802 + uint64_t i_rsvd_1:54; 1803 + uint64_t i_widget:4; 1804 + uint64_t i_to_cnt:5; 1805 + uint64_t i_vld:1; 1768 1806 } ii_iprte2a_fld_s; 1769 1807 } ii_iprte2a_u_t; 1770 1808 1771 - 1772 1809 /************************************************************************ 1773 - * * 1810 + * * 1774 1811 * There are 8 instances of this register. This register contains * 1775 1812 * the information that the II has to remember once it has launched a * 1776 1813 * PIO Read operation. The contents are used to form the correct * 1777 1814 * Router Network packet and direct the Crosstalk reply to the * 1778 1815 * appropriate processor. * 1779 - * * 1816 + * * 1780 1817 ************************************************************************/ 1781 1818 1782 1819 typedef union ii_iprte3a_u { 1783 - uint64_t ii_iprte3a_regval; 1784 - struct { 1785 - uint64_t i_rsvd_1 : 54; 1786 - uint64_t i_widget : 4; 1787 - uint64_t i_to_cnt : 5; 1788 - uint64_t i_vld : 1; 1820 + uint64_t ii_iprte3a_regval; 1821 + struct { 1822 + uint64_t i_rsvd_1:54; 1823 + uint64_t i_widget:4; 1824 + uint64_t i_to_cnt:5; 1825 + uint64_t i_vld:1; 1789 1826 } ii_iprte3a_fld_s; 1790 1827 } ii_iprte3a_u_t; 1791 1828 1792 - 1793 1829 /************************************************************************ 1794 - * * 1830 + * * 1795 1831 * There are 8 instances of this register. This register contains * 1796 1832 * the information that the II has to remember once it has launched a * 1797 1833 * PIO Read operation. The contents are used to form the correct * 1798 1834 * Router Network packet and direct the Crosstalk reply to the * 1799 1835 * appropriate processor. * 1800 - * * 1836 + * * 1801 1837 ************************************************************************/ 1802 1838 1803 1839 typedef union ii_iprte4a_u { 1804 - uint64_t ii_iprte4a_regval; 1805 - struct { 1806 - uint64_t i_rsvd_1 : 54; 1807 - uint64_t i_widget : 4; 1808 - uint64_t i_to_cnt : 5; 1809 - uint64_t i_vld : 1; 1840 + uint64_t ii_iprte4a_regval; 1841 + struct { 1842 + uint64_t i_rsvd_1:54; 1843 + uint64_t i_widget:4; 1844 + uint64_t i_to_cnt:5; 1845 + uint64_t i_vld:1; 1810 1846 } ii_iprte4a_fld_s; 1811 1847 } ii_iprte4a_u_t; 1812 1848 1813 - 1814 1849 /************************************************************************ 1815 - * * 1850 + * * 1816 1851 * There are 8 instances of this register. This register contains * 1817 1852 * the information that the II has to remember once it has launched a * 1818 1853 * PIO Read operation. The contents are used to form the correct * 1819 1854 * Router Network packet and direct the Crosstalk reply to the * 1820 1855 * appropriate processor. * 1821 - * * 1856 + * * 1822 1857 ************************************************************************/ 1823 1858 1824 1859 typedef union ii_iprte5a_u { 1825 - uint64_t ii_iprte5a_regval; 1826 - struct { 1827 - uint64_t i_rsvd_1 : 54; 1828 - uint64_t i_widget : 4; 1829 - uint64_t i_to_cnt : 5; 1830 - uint64_t i_vld : 1; 1860 + uint64_t ii_iprte5a_regval; 1861 + struct { 1862 + uint64_t i_rsvd_1:54; 1863 + uint64_t i_widget:4; 1864 + uint64_t i_to_cnt:5; 1865 + uint64_t i_vld:1; 1831 1866 } ii_iprte5a_fld_s; 1832 1867 } ii_iprte5a_u_t; 1833 1868 1834 - 1835 1869 /************************************************************************ 1836 - * * 1870 + * * 1837 1871 * There are 8 instances of this register. This register contains * 1838 1872 * the information that the II has to remember once it has launched a * 1839 1873 * PIO Read operation. The contents are used to form the correct * 1840 1874 * Router Network packet and direct the Crosstalk reply to the * 1841 1875 * appropriate processor. * 1842 - * * 1876 + * * 1843 1877 ************************************************************************/ 1844 1878 1845 1879 typedef union ii_iprte6a_u { 1846 - uint64_t ii_iprte6a_regval; 1847 - struct { 1848 - uint64_t i_rsvd_1 : 54; 1849 - uint64_t i_widget : 4; 1850 - uint64_t i_to_cnt : 5; 1851 - uint64_t i_vld : 1; 1880 + uint64_t ii_iprte6a_regval; 1881 + struct { 1882 + uint64_t i_rsvd_1:54; 1883 + uint64_t i_widget:4; 1884 + uint64_t i_to_cnt:5; 1885 + uint64_t i_vld:1; 1852 1886 } ii_iprte6a_fld_s; 1853 1887 } ii_iprte6a_u_t; 1854 1888 1855 - 1856 1889 /************************************************************************ 1857 - * * 1890 + * * 1858 1891 * There are 8 instances of this register. This register contains * 1859 1892 * the information that the II has to remember once it has launched a * 1860 1893 * PIO Read operation. The contents are used to form the correct * 1861 1894 * Router Network packet and direct the Crosstalk reply to the * 1862 1895 * appropriate processor. * 1863 - * * 1896 + * * 1864 1897 ************************************************************************/ 1865 1898 1866 1899 typedef union ii_iprte7a_u { 1867 - uint64_t ii_iprte7a_regval; 1868 - struct { 1869 - uint64_t i_rsvd_1 : 54; 1870 - uint64_t i_widget : 4; 1871 - uint64_t i_to_cnt : 5; 1872 - uint64_t i_vld : 1; 1873 - } ii_iprtea7_fld_s; 1900 + uint64_t ii_iprte7a_regval; 1901 + struct { 1902 + uint64_t i_rsvd_1:54; 1903 + uint64_t i_widget:4; 1904 + uint64_t i_to_cnt:5; 1905 + uint64_t i_vld:1; 1906 + } ii_iprtea7_fld_s; 1874 1907 } ii_iprte7a_u_t; 1875 1908 1876 - 1877 - 1878 1909 /************************************************************************ 1879 - * * 1910 + * * 1880 1911 * There are 8 instances of this register. This register contains * 1881 1912 * the information that the II has to remember once it has launched a * 1882 1913 * PIO Read operation. The contents are used to form the correct * 1883 1914 * Router Network packet and direct the Crosstalk reply to the * 1884 1915 * appropriate processor. * 1885 - * * 1916 + * * 1886 1917 ************************************************************************/ 1887 1918 1888 - 1889 1919 typedef union ii_iprte0b_u { 1890 - uint64_t ii_iprte0b_regval; 1891 - struct { 1892 - uint64_t i_rsvd_1 : 3; 1893 - uint64_t i_address : 47; 1894 - uint64_t i_init : 3; 1895 - uint64_t i_source : 11; 1920 + uint64_t ii_iprte0b_regval; 1921 + struct { 1922 + uint64_t i_rsvd_1:3; 1923 + uint64_t i_address:47; 1924 + uint64_t i_init:3; 1925 + uint64_t i_source:11; 1896 1926 } ii_iprte0b_fld_s; 1897 1927 } ii_iprte0b_u_t; 1898 1928 1899 - 1900 1929 /************************************************************************ 1901 - * * 1930 + * * 1902 1931 * There are 8 instances of this register. This register contains * 1903 1932 * the information that the II has to remember once it has launched a * 1904 1933 * PIO Read operation. The contents are used to form the correct * 1905 1934 * Router Network packet and direct the Crosstalk reply to the * 1906 1935 * appropriate processor. * 1907 - * * 1936 + * * 1908 1937 ************************************************************************/ 1909 1938 1910 1939 typedef union ii_iprte1b_u { 1911 - uint64_t ii_iprte1b_regval; 1912 - struct { 1913 - uint64_t i_rsvd_1 : 3; 1914 - uint64_t i_address : 47; 1915 - uint64_t i_init : 3; 1916 - uint64_t i_source : 11; 1940 + uint64_t ii_iprte1b_regval; 1941 + struct { 1942 + uint64_t i_rsvd_1:3; 1943 + uint64_t i_address:47; 1944 + uint64_t i_init:3; 1945 + uint64_t i_source:11; 1917 1946 } ii_iprte1b_fld_s; 1918 1947 } ii_iprte1b_u_t; 1919 1948 1920 - 1921 1949 /************************************************************************ 1922 - * * 1950 + * * 1923 1951 * There are 8 instances of this register. This register contains * 1924 1952 * the information that the II has to remember once it has launched a * 1925 1953 * PIO Read operation. The contents are used to form the correct * 1926 1954 * Router Network packet and direct the Crosstalk reply to the * 1927 1955 * appropriate processor. * 1928 - * * 1956 + * * 1929 1957 ************************************************************************/ 1930 1958 1931 1959 typedef union ii_iprte2b_u { 1932 - uint64_t ii_iprte2b_regval; 1933 - struct { 1934 - uint64_t i_rsvd_1 : 3; 1935 - uint64_t i_address : 47; 1936 - uint64_t i_init : 3; 1937 - uint64_t i_source : 11; 1960 + uint64_t ii_iprte2b_regval; 1961 + struct { 1962 + uint64_t i_rsvd_1:3; 1963 + uint64_t i_address:47; 1964 + uint64_t i_init:3; 1965 + uint64_t i_source:11; 1938 1966 } ii_iprte2b_fld_s; 1939 1967 } ii_iprte2b_u_t; 1940 1968 1941 - 1942 1969 /************************************************************************ 1943 - * * 1970 + * * 1944 1971 * There are 8 instances of this register. This register contains * 1945 1972 * the information that the II has to remember once it has launched a * 1946 1973 * PIO Read operation. The contents are used to form the correct * 1947 1974 * Router Network packet and direct the Crosstalk reply to the * 1948 1975 * appropriate processor. * 1949 - * * 1976 + * * 1950 1977 ************************************************************************/ 1951 1978 1952 1979 typedef union ii_iprte3b_u { 1953 - uint64_t ii_iprte3b_regval; 1954 - struct { 1955 - uint64_t i_rsvd_1 : 3; 1956 - uint64_t i_address : 47; 1957 - uint64_t i_init : 3; 1958 - uint64_t i_source : 11; 1980 + uint64_t ii_iprte3b_regval; 1981 + struct { 1982 + uint64_t i_rsvd_1:3; 1983 + uint64_t i_address:47; 1984 + uint64_t i_init:3; 1985 + uint64_t i_source:11; 1959 1986 } ii_iprte3b_fld_s; 1960 1987 } ii_iprte3b_u_t; 1961 1988 1962 - 1963 1989 /************************************************************************ 1964 - * * 1990 + * * 1965 1991 * There are 8 instances of this register. This register contains * 1966 1992 * the information that the II has to remember once it has launched a * 1967 1993 * PIO Read operation. The contents are used to form the correct * 1968 1994 * Router Network packet and direct the Crosstalk reply to the * 1969 1995 * appropriate processor. * 1970 - * * 1996 + * * 1971 1997 ************************************************************************/ 1972 1998 1973 1999 typedef union ii_iprte4b_u { 1974 - uint64_t ii_iprte4b_regval; 1975 - struct { 1976 - uint64_t i_rsvd_1 : 3; 1977 - uint64_t i_address : 47; 1978 - uint64_t i_init : 3; 1979 - uint64_t i_source : 11; 2000 + uint64_t ii_iprte4b_regval; 2001 + struct { 2002 + uint64_t i_rsvd_1:3; 2003 + uint64_t i_address:47; 2004 + uint64_t i_init:3; 2005 + uint64_t i_source:11; 1980 2006 } ii_iprte4b_fld_s; 1981 2007 } ii_iprte4b_u_t; 1982 2008 1983 - 1984 2009 /************************************************************************ 1985 - * * 2010 + * * 1986 2011 * There are 8 instances of this register. This register contains * 1987 2012 * the information that the II has to remember once it has launched a * 1988 2013 * PIO Read operation. The contents are used to form the correct * 1989 2014 * Router Network packet and direct the Crosstalk reply to the * 1990 2015 * appropriate processor. * 1991 - * * 2016 + * * 1992 2017 ************************************************************************/ 1993 2018 1994 2019 typedef union ii_iprte5b_u { 1995 - uint64_t ii_iprte5b_regval; 1996 - struct { 1997 - uint64_t i_rsvd_1 : 3; 1998 - uint64_t i_address : 47; 1999 - uint64_t i_init : 3; 2000 - uint64_t i_source : 11; 2020 + uint64_t ii_iprte5b_regval; 2021 + struct { 2022 + uint64_t i_rsvd_1:3; 2023 + uint64_t i_address:47; 2024 + uint64_t i_init:3; 2025 + uint64_t i_source:11; 2001 2026 } ii_iprte5b_fld_s; 2002 2027 } ii_iprte5b_u_t; 2003 2028 2004 - 2005 2029 /************************************************************************ 2006 - * * 2030 + * * 2007 2031 * There are 8 instances of this register. This register contains * 2008 2032 * the information that the II has to remember once it has launched a * 2009 2033 * PIO Read operation. The contents are used to form the correct * 2010 2034 * Router Network packet and direct the Crosstalk reply to the * 2011 2035 * appropriate processor. * 2012 - * * 2036 + * * 2013 2037 ************************************************************************/ 2014 2038 2015 2039 typedef union ii_iprte6b_u { 2016 - uint64_t ii_iprte6b_regval; 2017 - struct { 2018 - uint64_t i_rsvd_1 : 3; 2019 - uint64_t i_address : 47; 2020 - uint64_t i_init : 3; 2021 - uint64_t i_source : 11; 2040 + uint64_t ii_iprte6b_regval; 2041 + struct { 2042 + uint64_t i_rsvd_1:3; 2043 + uint64_t i_address:47; 2044 + uint64_t i_init:3; 2045 + uint64_t i_source:11; 2022 2046 2023 2047 } ii_iprte6b_fld_s; 2024 2048 } ii_iprte6b_u_t; 2025 2049 2026 - 2027 2050 /************************************************************************ 2028 - * * 2051 + * * 2029 2052 * There are 8 instances of this register. This register contains * 2030 2053 * the information that the II has to remember once it has launched a * 2031 2054 * PIO Read operation. The contents are used to form the correct * 2032 2055 * Router Network packet and direct the Crosstalk reply to the * 2033 2056 * appropriate processor. * 2034 - * * 2057 + * * 2035 2058 ************************************************************************/ 2036 2059 2037 2060 typedef union ii_iprte7b_u { 2038 - uint64_t ii_iprte7b_regval; 2039 - struct { 2040 - uint64_t i_rsvd_1 : 3; 2041 - uint64_t i_address : 47; 2042 - uint64_t i_init : 3; 2043 - uint64_t i_source : 11; 2044 - } ii_iprte7b_fld_s; 2061 + uint64_t ii_iprte7b_regval; 2062 + struct { 2063 + uint64_t i_rsvd_1:3; 2064 + uint64_t i_address:47; 2065 + uint64_t i_init:3; 2066 + uint64_t i_source:11; 2067 + } ii_iprte7b_fld_s; 2045 2068 } ii_iprte7b_u_t; 2046 2069 2047 - 2048 2070 /************************************************************************ 2049 - * * 2071 + * * 2050 2072 * Description: SHub II contains a feature which did not exist in * 2051 2073 * the Hub which automatically cleans up after a Read Response * 2052 2074 * timeout, including deallocation of the IPRTE and recovery of IBuf * ··· 2034 2108 * Note that this register does not affect the contents of the IPRTE * 2035 2109 * registers. The Valid bits in those registers have to be * 2036 2110 * specifically turned off by software. * 2037 - * * 2111 + * * 2038 2112 ************************************************************************/ 2039 2113 2040 2114 typedef union ii_ipdr_u { 2041 - uint64_t ii_ipdr_regval; 2042 - struct { 2043 - uint64_t i_te : 3; 2044 - uint64_t i_rsvd_1 : 1; 2045 - uint64_t i_pnd : 1; 2046 - uint64_t i_init_rpcnt : 1; 2047 - uint64_t i_rsvd : 58; 2115 + uint64_t ii_ipdr_regval; 2116 + struct { 2117 + uint64_t i_te:3; 2118 + uint64_t i_rsvd_1:1; 2119 + uint64_t i_pnd:1; 2120 + uint64_t i_init_rpcnt:1; 2121 + uint64_t i_rsvd:58; 2048 2122 } ii_ipdr_fld_s; 2049 2123 } ii_ipdr_u_t; 2050 2124 2051 - 2052 2125 /************************************************************************ 2053 - * * 2126 + * * 2054 2127 * A write to this register causes a CRB entry to be returned to the * 2055 2128 * queue of free CRBs. The entry should have previously been cleared * 2056 2129 * (mark bit) via backdoor access to the pertinent CRB entry. This * ··· 2062 2137 * software clears the mark bit, and finally 4) software writes to * 2063 2138 * the ICDR register to return the CRB entry to the list of free CRB * 2064 2139 * entries. * 2065 - * * 2140 + * * 2066 2141 ************************************************************************/ 2067 2142 2068 2143 typedef union ii_icdr_u { 2069 - uint64_t ii_icdr_regval; 2070 - struct { 2071 - uint64_t i_crb_num : 4; 2072 - uint64_t i_pnd : 1; 2073 - uint64_t i_rsvd : 59; 2144 + uint64_t ii_icdr_regval; 2145 + struct { 2146 + uint64_t i_crb_num:4; 2147 + uint64_t i_pnd:1; 2148 + uint64_t i_rsvd:59; 2074 2149 } ii_icdr_fld_s; 2075 2150 } ii_icdr_u_t; 2076 2151 2077 - 2078 2152 /************************************************************************ 2079 - * * 2153 + * * 2080 2154 * This register provides debug access to two FIFOs inside of II. * 2081 2155 * Both IOQ_MAX* fields of this register contain the instantaneous * 2082 2156 * depth (in units of the number of available entries) of the * ··· 2088 2164 * this register is written. If there are any active entries in any * 2089 2165 * of these FIFOs when this register is written, the results are * 2090 2166 * undefined. * 2091 - * * 2167 + * * 2092 2168 ************************************************************************/ 2093 2169 2094 2170 typedef union ii_ifdr_u { 2095 - uint64_t ii_ifdr_regval; 2096 - struct { 2097 - uint64_t i_ioq_max_rq : 7; 2098 - uint64_t i_set_ioq_rq : 1; 2099 - uint64_t i_ioq_max_rp : 7; 2100 - uint64_t i_set_ioq_rp : 1; 2101 - uint64_t i_rsvd : 48; 2171 + uint64_t ii_ifdr_regval; 2172 + struct { 2173 + uint64_t i_ioq_max_rq:7; 2174 + uint64_t i_set_ioq_rq:1; 2175 + uint64_t i_ioq_max_rp:7; 2176 + uint64_t i_set_ioq_rp:1; 2177 + uint64_t i_rsvd:48; 2102 2178 } ii_ifdr_fld_s; 2103 2179 } ii_ifdr_u_t; 2104 2180 2105 - 2106 2181 /************************************************************************ 2107 - * * 2182 + * * 2108 2183 * This register allows the II to become sluggish in removing * 2109 2184 * messages from its inbound queue (IIQ). This will cause messages to * 2110 2185 * back up in either virtual channel. Disabling the "molasses" mode * 2111 2186 * subsequently allows the II to be tested under stress. In the * 2112 2187 * sluggish ("Molasses") mode, the localized effects of congestion * 2113 2188 * can be observed. * 2114 - * * 2189 + * * 2115 2190 ************************************************************************/ 2116 2191 2117 2192 typedef union ii_iiap_u { 2118 - uint64_t ii_iiap_regval; 2119 - struct { 2120 - uint64_t i_rq_mls : 6; 2121 - uint64_t i_rsvd_1 : 2; 2122 - uint64_t i_rp_mls : 6; 2123 - uint64_t i_rsvd : 50; 2124 - } ii_iiap_fld_s; 2193 + uint64_t ii_iiap_regval; 2194 + struct { 2195 + uint64_t i_rq_mls:6; 2196 + uint64_t i_rsvd_1:2; 2197 + uint64_t i_rp_mls:6; 2198 + uint64_t i_rsvd:50; 2199 + } ii_iiap_fld_s; 2125 2200 } ii_iiap_u_t; 2126 2201 2127 - 2128 2202 /************************************************************************ 2129 - * * 2203 + * * 2130 2204 * This register allows several parameters of CRB operation to be * 2131 2205 * set. Note that writing to this register can have catastrophic side * 2132 2206 * effects, if the CRB is not quiescent, i.e. if the CRB is * 2133 2207 * processing protocol messages when the write occurs. * 2134 - * * 2208 + * * 2135 2209 ************************************************************************/ 2136 2210 2137 2211 typedef union ii_icmr_u { 2138 - uint64_t ii_icmr_regval; 2139 - struct { 2140 - uint64_t i_sp_msg : 1; 2141 - uint64_t i_rd_hdr : 1; 2142 - uint64_t i_rsvd_4 : 2; 2143 - uint64_t i_c_cnt : 4; 2144 - uint64_t i_rsvd_3 : 4; 2145 - uint64_t i_clr_rqpd : 1; 2146 - uint64_t i_clr_rppd : 1; 2147 - uint64_t i_rsvd_2 : 2; 2148 - uint64_t i_fc_cnt : 4; 2149 - uint64_t i_crb_vld : 15; 2150 - uint64_t i_crb_mark : 15; 2151 - uint64_t i_rsvd_1 : 2; 2152 - uint64_t i_precise : 1; 2153 - uint64_t i_rsvd : 11; 2212 + uint64_t ii_icmr_regval; 2213 + struct { 2214 + uint64_t i_sp_msg:1; 2215 + uint64_t i_rd_hdr:1; 2216 + uint64_t i_rsvd_4:2; 2217 + uint64_t i_c_cnt:4; 2218 + uint64_t i_rsvd_3:4; 2219 + uint64_t i_clr_rqpd:1; 2220 + uint64_t i_clr_rppd:1; 2221 + uint64_t i_rsvd_2:2; 2222 + uint64_t i_fc_cnt:4; 2223 + uint64_t i_crb_vld:15; 2224 + uint64_t i_crb_mark:15; 2225 + uint64_t i_rsvd_1:2; 2226 + uint64_t i_precise:1; 2227 + uint64_t i_rsvd:11; 2154 2228 } ii_icmr_fld_s; 2155 2229 } ii_icmr_u_t; 2156 2230 2157 - 2158 2231 /************************************************************************ 2159 - * * 2232 + * * 2160 2233 * This register allows control of the table portion of the CRB * 2161 2234 * logic via software. Control operations from this register have * 2162 2235 * priority over all incoming Crosstalk or BTE requests. * 2163 - * * 2236 + * * 2164 2237 ************************************************************************/ 2165 2238 2166 2239 typedef union ii_iccr_u { 2167 - uint64_t ii_iccr_regval; 2168 - struct { 2169 - uint64_t i_crb_num : 4; 2170 - uint64_t i_rsvd_1 : 4; 2171 - uint64_t i_cmd : 8; 2172 - uint64_t i_pending : 1; 2173 - uint64_t i_rsvd : 47; 2240 + uint64_t ii_iccr_regval; 2241 + struct { 2242 + uint64_t i_crb_num:4; 2243 + uint64_t i_rsvd_1:4; 2244 + uint64_t i_cmd:8; 2245 + uint64_t i_pending:1; 2246 + uint64_t i_rsvd:47; 2174 2247 } ii_iccr_fld_s; 2175 2248 } ii_iccr_u_t; 2176 2249 2177 - 2178 2250 /************************************************************************ 2179 - * * 2251 + * * 2180 2252 * This register allows the maximum timeout value to be programmed. * 2181 - * * 2253 + * * 2182 2254 ************************************************************************/ 2183 2255 2184 2256 typedef union ii_icto_u { 2185 - uint64_t ii_icto_regval; 2186 - struct { 2187 - uint64_t i_timeout : 8; 2188 - uint64_t i_rsvd : 56; 2257 + uint64_t ii_icto_regval; 2258 + struct { 2259 + uint64_t i_timeout:8; 2260 + uint64_t i_rsvd:56; 2189 2261 } ii_icto_fld_s; 2190 2262 } ii_icto_u_t; 2191 2263 2192 - 2193 2264 /************************************************************************ 2194 - * * 2265 + * * 2195 2266 * This register allows the timeout prescalar to be programmed. An * 2196 2267 * internal counter is associated with this register. When the * 2197 2268 * internal counter reaches the value of the PRESCALE field, the * 2198 2269 * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] * 2199 2270 * field). The internal counter resets to zero, and then continues * 2200 2271 * counting. * 2201 - * * 2272 + * * 2202 2273 ************************************************************************/ 2203 2274 2204 2275 typedef union ii_ictp_u { 2205 - uint64_t ii_ictp_regval; 2206 - struct { 2207 - uint64_t i_prescale : 24; 2208 - uint64_t i_rsvd : 40; 2276 + uint64_t ii_ictp_regval; 2277 + struct { 2278 + uint64_t i_prescale:24; 2279 + uint64_t i_rsvd:40; 2209 2280 } ii_ictp_fld_s; 2210 2281 } ii_ictp_u_t; 2211 2282 2212 - 2213 2283 /************************************************************************ 2214 - * * 2284 + * * 2215 2285 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2216 2286 * used for Crosstalk operations (both cacheline and partial * 2217 2287 * operations) or BTE/IO. Because the CRB entries are very wide, five * ··· 2224 2306 * recovering any potential error state from before the reset). * 2225 2307 * The following four tables summarize the format for the four * 2226 2308 * registers that are used for each ICRB# Entry. * 2227 - * * 2309 + * * 2228 2310 ************************************************************************/ 2229 2311 2230 2312 typedef union ii_icrb0_a_u { 2231 - uint64_t ii_icrb0_a_regval; 2232 - struct { 2233 - uint64_t ia_iow : 1; 2234 - uint64_t ia_vld : 1; 2235 - uint64_t ia_addr : 47; 2236 - uint64_t ia_tnum : 5; 2237 - uint64_t ia_sidn : 4; 2238 - uint64_t ia_rsvd : 6; 2313 + uint64_t ii_icrb0_a_regval; 2314 + struct { 2315 + uint64_t ia_iow:1; 2316 + uint64_t ia_vld:1; 2317 + uint64_t ia_addr:47; 2318 + uint64_t ia_tnum:5; 2319 + uint64_t ia_sidn:4; 2320 + uint64_t ia_rsvd:6; 2239 2321 } ii_icrb0_a_fld_s; 2240 2322 } ii_icrb0_a_u_t; 2241 2323 2242 - 2243 2324 /************************************************************************ 2244 - * * 2325 + * * 2245 2326 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2246 2327 * used for Crosstalk operations (both cacheline and partial * 2247 2328 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2248 2329 * registers (_A to _E) are required to read and write each entry. * 2249 - * * 2330 + * * 2250 2331 ************************************************************************/ 2251 2332 2252 2333 typedef union ii_icrb0_b_u { 2253 - uint64_t ii_icrb0_b_regval; 2254 - struct { 2255 - uint64_t ib_xt_err : 1; 2256 - uint64_t ib_mark : 1; 2257 - uint64_t ib_ln_uce : 1; 2258 - uint64_t ib_errcode : 3; 2259 - uint64_t ib_error : 1; 2260 - uint64_t ib_stall__bte_1 : 1; 2261 - uint64_t ib_stall__bte_0 : 1; 2262 - uint64_t ib_stall__intr : 1; 2263 - uint64_t ib_stall_ib : 1; 2264 - uint64_t ib_intvn : 1; 2265 - uint64_t ib_wb : 1; 2266 - uint64_t ib_hold : 1; 2267 - uint64_t ib_ack : 1; 2268 - uint64_t ib_resp : 1; 2269 - uint64_t ib_ack_cnt : 11; 2270 - uint64_t ib_rsvd : 7; 2271 - uint64_t ib_exc : 5; 2272 - uint64_t ib_init : 3; 2273 - uint64_t ib_imsg : 8; 2274 - uint64_t ib_imsgtype : 2; 2275 - uint64_t ib_use_old : 1; 2276 - uint64_t ib_rsvd_1 : 11; 2334 + uint64_t ii_icrb0_b_regval; 2335 + struct { 2336 + uint64_t ib_xt_err:1; 2337 + uint64_t ib_mark:1; 2338 + uint64_t ib_ln_uce:1; 2339 + uint64_t ib_errcode:3; 2340 + uint64_t ib_error:1; 2341 + uint64_t ib_stall__bte_1:1; 2342 + uint64_t ib_stall__bte_0:1; 2343 + uint64_t ib_stall__intr:1; 2344 + uint64_t ib_stall_ib:1; 2345 + uint64_t ib_intvn:1; 2346 + uint64_t ib_wb:1; 2347 + uint64_t ib_hold:1; 2348 + uint64_t ib_ack:1; 2349 + uint64_t ib_resp:1; 2350 + uint64_t ib_ack_cnt:11; 2351 + uint64_t ib_rsvd:7; 2352 + uint64_t ib_exc:5; 2353 + uint64_t ib_init:3; 2354 + uint64_t ib_imsg:8; 2355 + uint64_t ib_imsgtype:2; 2356 + uint64_t ib_use_old:1; 2357 + uint64_t ib_rsvd_1:11; 2277 2358 } ii_icrb0_b_fld_s; 2278 2359 } ii_icrb0_b_u_t; 2279 2360 2280 - 2281 2361 /************************************************************************ 2282 - * * 2362 + * * 2283 2363 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2284 2364 * used for Crosstalk operations (both cacheline and partial * 2285 2365 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2286 2366 * registers (_A to _E) are required to read and write each entry. * 2287 - * * 2367 + * * 2288 2368 ************************************************************************/ 2289 2369 2290 2370 typedef union ii_icrb0_c_u { 2291 - uint64_t ii_icrb0_c_regval; 2292 - struct { 2293 - uint64_t ic_source : 15; 2294 - uint64_t ic_size : 2; 2295 - uint64_t ic_ct : 1; 2296 - uint64_t ic_bte_num : 1; 2297 - uint64_t ic_gbr : 1; 2298 - uint64_t ic_resprqd : 1; 2299 - uint64_t ic_bo : 1; 2300 - uint64_t ic_suppl : 15; 2301 - uint64_t ic_rsvd : 27; 2371 + uint64_t ii_icrb0_c_regval; 2372 + struct { 2373 + uint64_t ic_source:15; 2374 + uint64_t ic_size:2; 2375 + uint64_t ic_ct:1; 2376 + uint64_t ic_bte_num:1; 2377 + uint64_t ic_gbr:1; 2378 + uint64_t ic_resprqd:1; 2379 + uint64_t ic_bo:1; 2380 + uint64_t ic_suppl:15; 2381 + uint64_t ic_rsvd:27; 2302 2382 } ii_icrb0_c_fld_s; 2303 2383 } ii_icrb0_c_u_t; 2304 2384 2305 - 2306 2385 /************************************************************************ 2307 - * * 2386 + * * 2308 2387 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2309 2388 * used for Crosstalk operations (both cacheline and partial * 2310 2389 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2311 2390 * registers (_A to _E) are required to read and write each entry. * 2312 - * * 2391 + * * 2313 2392 ************************************************************************/ 2314 2393 2315 2394 typedef union ii_icrb0_d_u { 2316 - uint64_t ii_icrb0_d_regval; 2317 - struct { 2318 - uint64_t id_pa_be : 43; 2319 - uint64_t id_bte_op : 1; 2320 - uint64_t id_pr_psc : 4; 2321 - uint64_t id_pr_cnt : 4; 2322 - uint64_t id_sleep : 1; 2323 - uint64_t id_rsvd : 11; 2395 + uint64_t ii_icrb0_d_regval; 2396 + struct { 2397 + uint64_t id_pa_be:43; 2398 + uint64_t id_bte_op:1; 2399 + uint64_t id_pr_psc:4; 2400 + uint64_t id_pr_cnt:4; 2401 + uint64_t id_sleep:1; 2402 + uint64_t id_rsvd:11; 2324 2403 } ii_icrb0_d_fld_s; 2325 2404 } ii_icrb0_d_u_t; 2326 2405 2327 - 2328 2406 /************************************************************************ 2329 - * * 2407 + * * 2330 2408 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2331 2409 * used for Crosstalk operations (both cacheline and partial * 2332 2410 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2333 2411 * registers (_A to _E) are required to read and write each entry. * 2334 - * * 2412 + * * 2335 2413 ************************************************************************/ 2336 2414 2337 2415 typedef union ii_icrb0_e_u { 2338 - uint64_t ii_icrb0_e_regval; 2339 - struct { 2340 - uint64_t ie_timeout : 8; 2341 - uint64_t ie_context : 15; 2342 - uint64_t ie_rsvd : 1; 2343 - uint64_t ie_tvld : 1; 2344 - uint64_t ie_cvld : 1; 2345 - uint64_t ie_rsvd_0 : 38; 2416 + uint64_t ii_icrb0_e_regval; 2417 + struct { 2418 + uint64_t ie_timeout:8; 2419 + uint64_t ie_context:15; 2420 + uint64_t ie_rsvd:1; 2421 + uint64_t ie_tvld:1; 2422 + uint64_t ie_cvld:1; 2423 + uint64_t ie_rsvd_0:38; 2346 2424 } ii_icrb0_e_fld_s; 2347 2425 } ii_icrb0_e_u_t; 2348 2426 2349 - 2350 2427 /************************************************************************ 2351 - * * 2428 + * * 2352 2429 * This register contains the lower 64 bits of the header of the * 2353 2430 * spurious message captured by II. Valid when the SP_MSG bit in ICMR * 2354 2431 * register is set. * 2355 - * * 2432 + * * 2356 2433 ************************************************************************/ 2357 2434 2358 2435 typedef union ii_icsml_u { 2359 - uint64_t ii_icsml_regval; 2360 - struct { 2361 - uint64_t i_tt_addr : 47; 2362 - uint64_t i_newsuppl_ex : 14; 2363 - uint64_t i_reserved : 2; 2364 - uint64_t i_overflow : 1; 2436 + uint64_t ii_icsml_regval; 2437 + struct { 2438 + uint64_t i_tt_addr:47; 2439 + uint64_t i_newsuppl_ex:14; 2440 + uint64_t i_reserved:2; 2441 + uint64_t i_overflow:1; 2365 2442 } ii_icsml_fld_s; 2366 2443 } ii_icsml_u_t; 2367 2444 2368 - 2369 2445 /************************************************************************ 2370 - * * 2446 + * * 2371 2447 * This register contains the middle 64 bits of the header of the * 2372 2448 * spurious message captured by II. Valid when the SP_MSG bit in ICMR * 2373 2449 * register is set. * 2374 - * * 2450 + * * 2375 2451 ************************************************************************/ 2376 2452 2377 2453 typedef union ii_icsmm_u { 2378 - uint64_t ii_icsmm_regval; 2379 - struct { 2380 - uint64_t i_tt_ack_cnt : 11; 2381 - uint64_t i_reserved : 53; 2454 + uint64_t ii_icsmm_regval; 2455 + struct { 2456 + uint64_t i_tt_ack_cnt:11; 2457 + uint64_t i_reserved:53; 2382 2458 } ii_icsmm_fld_s; 2383 2459 } ii_icsmm_u_t; 2384 2460 2385 - 2386 2461 /************************************************************************ 2387 - * * 2462 + * * 2388 2463 * This register contains the microscopic state, all the inputs to * 2389 2464 * the protocol table, captured with the spurious message. Valid when * 2390 2465 * the SP_MSG bit in the ICMR register is set. * 2391 - * * 2466 + * * 2392 2467 ************************************************************************/ 2393 2468 2394 2469 typedef union ii_icsmh_u { 2395 - uint64_t ii_icsmh_regval; 2396 - struct { 2397 - uint64_t i_tt_vld : 1; 2398 - uint64_t i_xerr : 1; 2399 - uint64_t i_ft_cwact_o : 1; 2400 - uint64_t i_ft_wact_o : 1; 2401 - uint64_t i_ft_active_o : 1; 2402 - uint64_t i_sync : 1; 2403 - uint64_t i_mnusg : 1; 2404 - uint64_t i_mnusz : 1; 2405 - uint64_t i_plusz : 1; 2406 - uint64_t i_plusg : 1; 2407 - uint64_t i_tt_exc : 5; 2408 - uint64_t i_tt_wb : 1; 2409 - uint64_t i_tt_hold : 1; 2410 - uint64_t i_tt_ack : 1; 2411 - uint64_t i_tt_resp : 1; 2412 - uint64_t i_tt_intvn : 1; 2413 - uint64_t i_g_stall_bte1 : 1; 2414 - uint64_t i_g_stall_bte0 : 1; 2415 - uint64_t i_g_stall_il : 1; 2416 - uint64_t i_g_stall_ib : 1; 2417 - uint64_t i_tt_imsg : 8; 2418 - uint64_t i_tt_imsgtype : 2; 2419 - uint64_t i_tt_use_old : 1; 2420 - uint64_t i_tt_respreqd : 1; 2421 - uint64_t i_tt_bte_num : 1; 2422 - uint64_t i_cbn : 1; 2423 - uint64_t i_match : 1; 2424 - uint64_t i_rpcnt_lt_34 : 1; 2425 - uint64_t i_rpcnt_ge_34 : 1; 2426 - uint64_t i_rpcnt_lt_18 : 1; 2427 - uint64_t i_rpcnt_ge_18 : 1; 2428 - uint64_t i_rpcnt_lt_2 : 1; 2429 - uint64_t i_rpcnt_ge_2 : 1; 2430 - uint64_t i_rqcnt_lt_18 : 1; 2431 - uint64_t i_rqcnt_ge_18 : 1; 2432 - uint64_t i_rqcnt_lt_2 : 1; 2433 - uint64_t i_rqcnt_ge_2 : 1; 2434 - uint64_t i_tt_device : 7; 2435 - uint64_t i_tt_init : 3; 2436 - uint64_t i_reserved : 5; 2470 + uint64_t ii_icsmh_regval; 2471 + struct { 2472 + uint64_t i_tt_vld:1; 2473 + uint64_t i_xerr:1; 2474 + uint64_t i_ft_cwact_o:1; 2475 + uint64_t i_ft_wact_o:1; 2476 + uint64_t i_ft_active_o:1; 2477 + uint64_t i_sync:1; 2478 + uint64_t i_mnusg:1; 2479 + uint64_t i_mnusz:1; 2480 + uint64_t i_plusz:1; 2481 + uint64_t i_plusg:1; 2482 + uint64_t i_tt_exc:5; 2483 + uint64_t i_tt_wb:1; 2484 + uint64_t i_tt_hold:1; 2485 + uint64_t i_tt_ack:1; 2486 + uint64_t i_tt_resp:1; 2487 + uint64_t i_tt_intvn:1; 2488 + uint64_t i_g_stall_bte1:1; 2489 + uint64_t i_g_stall_bte0:1; 2490 + uint64_t i_g_stall_il:1; 2491 + uint64_t i_g_stall_ib:1; 2492 + uint64_t i_tt_imsg:8; 2493 + uint64_t i_tt_imsgtype:2; 2494 + uint64_t i_tt_use_old:1; 2495 + uint64_t i_tt_respreqd:1; 2496 + uint64_t i_tt_bte_num:1; 2497 + uint64_t i_cbn:1; 2498 + uint64_t i_match:1; 2499 + uint64_t i_rpcnt_lt_34:1; 2500 + uint64_t i_rpcnt_ge_34:1; 2501 + uint64_t i_rpcnt_lt_18:1; 2502 + uint64_t i_rpcnt_ge_18:1; 2503 + uint64_t i_rpcnt_lt_2:1; 2504 + uint64_t i_rpcnt_ge_2:1; 2505 + uint64_t i_rqcnt_lt_18:1; 2506 + uint64_t i_rqcnt_ge_18:1; 2507 + uint64_t i_rqcnt_lt_2:1; 2508 + uint64_t i_rqcnt_ge_2:1; 2509 + uint64_t i_tt_device:7; 2510 + uint64_t i_tt_init:3; 2511 + uint64_t i_reserved:5; 2437 2512 } ii_icsmh_fld_s; 2438 2513 } ii_icsmh_u_t; 2439 2514 2440 - 2441 2515 /************************************************************************ 2442 - * * 2516 + * * 2443 2517 * The Shub DEBUG unit provides a 3-bit selection signal to the * 2444 2518 * II core and a 3-bit selection signal to the fsbclk domain in the II * 2445 2519 * wrapper. * 2446 - * * 2520 + * * 2447 2521 ************************************************************************/ 2448 2522 2449 2523 typedef union ii_idbss_u { 2450 - uint64_t ii_idbss_regval; 2451 - struct { 2452 - uint64_t i_iioclk_core_submenu : 3; 2453 - uint64_t i_rsvd : 5; 2454 - uint64_t i_fsbclk_wrapper_submenu : 3; 2455 - uint64_t i_rsvd_1 : 5; 2456 - uint64_t i_iioclk_menu : 5; 2457 - uint64_t i_rsvd_2 : 43; 2524 + uint64_t ii_idbss_regval; 2525 + struct { 2526 + uint64_t i_iioclk_core_submenu:3; 2527 + uint64_t i_rsvd:5; 2528 + uint64_t i_fsbclk_wrapper_submenu:3; 2529 + uint64_t i_rsvd_1:5; 2530 + uint64_t i_iioclk_menu:5; 2531 + uint64_t i_rsvd_2:43; 2458 2532 } ii_idbss_fld_s; 2459 2533 } ii_idbss_u_t; 2460 2534 2461 - 2462 2535 /************************************************************************ 2463 - * * 2536 + * * 2464 2537 * Description: This register is used to set up the length for a * 2465 2538 * transfer and then to monitor the progress of that transfer. This * 2466 2539 * register needs to be initialized before a transfer is started. A * ··· 2462 2553 * transfer completes, hardware will clear the Busy bit. The length * 2463 2554 * field will also contain the number of cache lines left to be * 2464 2555 * transferred. * 2465 - * * 2556 + * * 2466 2557 ************************************************************************/ 2467 2558 2468 2559 typedef union ii_ibls0_u { 2469 - uint64_t ii_ibls0_regval; 2470 - struct { 2471 - uint64_t i_length : 16; 2472 - uint64_t i_error : 1; 2473 - uint64_t i_rsvd_1 : 3; 2474 - uint64_t i_busy : 1; 2475 - uint64_t i_rsvd : 43; 2560 + uint64_t ii_ibls0_regval; 2561 + struct { 2562 + uint64_t i_length:16; 2563 + uint64_t i_error:1; 2564 + uint64_t i_rsvd_1:3; 2565 + uint64_t i_busy:1; 2566 + uint64_t i_rsvd:43; 2476 2567 } ii_ibls0_fld_s; 2477 2568 } ii_ibls0_u_t; 2478 2569 2479 - 2480 2570 /************************************************************************ 2481 - * * 2571 + * * 2482 2572 * This register should be loaded before a transfer is started. The * 2483 2573 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2484 2574 * address as described in Section 1.3, Figure2 and Figure3. Since * 2485 2575 * the bottom 7 bits of the address are always taken to be zero, BTE * 2486 2576 * transfers are always cacheline-aligned. * 2487 - * * 2577 + * * 2488 2578 ************************************************************************/ 2489 2579 2490 2580 typedef union ii_ibsa0_u { 2491 - uint64_t ii_ibsa0_regval; 2492 - struct { 2493 - uint64_t i_rsvd_1 : 7; 2494 - uint64_t i_addr : 42; 2495 - uint64_t i_rsvd : 15; 2581 + uint64_t ii_ibsa0_regval; 2582 + struct { 2583 + uint64_t i_rsvd_1:7; 2584 + uint64_t i_addr:42; 2585 + uint64_t i_rsvd:15; 2496 2586 } ii_ibsa0_fld_s; 2497 2587 } ii_ibsa0_u_t; 2498 2588 2499 - 2500 2589 /************************************************************************ 2501 - * * 2590 + * * 2502 2591 * This register should be loaded before a transfer is started. The * 2503 2592 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2504 2593 * address as described in Section 1.3, Figure2 and Figure3. Since * 2505 2594 * the bottom 7 bits of the address are always taken to be zero, BTE * 2506 2595 * transfers are always cacheline-aligned. * 2507 - * * 2596 + * * 2508 2597 ************************************************************************/ 2509 2598 2510 2599 typedef union ii_ibda0_u { 2511 - uint64_t ii_ibda0_regval; 2512 - struct { 2513 - uint64_t i_rsvd_1 : 7; 2514 - uint64_t i_addr : 42; 2515 - uint64_t i_rsvd : 15; 2600 + uint64_t ii_ibda0_regval; 2601 + struct { 2602 + uint64_t i_rsvd_1:7; 2603 + uint64_t i_addr:42; 2604 + uint64_t i_rsvd:15; 2516 2605 } ii_ibda0_fld_s; 2517 2606 } ii_ibda0_u_t; 2518 2607 2519 - 2520 2608 /************************************************************************ 2521 - * * 2609 + * * 2522 2610 * Writing to this register sets up the attributes of the transfer * 2523 2611 * and initiates the transfer operation. Reading this register has * 2524 2612 * the side effect of terminating any transfer in progress. Note: * ··· 2523 2617 * other BTE. If a BTE stream has to be stopped (due to error * 2524 2618 * handling for example), both BTE streams should be stopped and * 2525 2619 * their transfers discarded. * 2526 - * * 2620 + * * 2527 2621 ************************************************************************/ 2528 2622 2529 2623 typedef union ii_ibct0_u { 2530 - uint64_t ii_ibct0_regval; 2531 - struct { 2532 - uint64_t i_zerofill : 1; 2533 - uint64_t i_rsvd_2 : 3; 2534 - uint64_t i_notify : 1; 2535 - uint64_t i_rsvd_1 : 3; 2536 - uint64_t i_poison : 1; 2537 - uint64_t i_rsvd : 55; 2624 + uint64_t ii_ibct0_regval; 2625 + struct { 2626 + uint64_t i_zerofill:1; 2627 + uint64_t i_rsvd_2:3; 2628 + uint64_t i_notify:1; 2629 + uint64_t i_rsvd_1:3; 2630 + uint64_t i_poison:1; 2631 + uint64_t i_rsvd:55; 2538 2632 } ii_ibct0_fld_s; 2539 2633 } ii_ibct0_u_t; 2540 2634 2541 - 2542 2635 /************************************************************************ 2543 - * * 2636 + * * 2544 2637 * This register contains the address to which the WINV is sent. * 2545 2638 * This address has to be cache line aligned. * 2546 - * * 2639 + * * 2547 2640 ************************************************************************/ 2548 2641 2549 2642 typedef union ii_ibna0_u { 2550 - uint64_t ii_ibna0_regval; 2551 - struct { 2552 - uint64_t i_rsvd_1 : 7; 2553 - uint64_t i_addr : 42; 2554 - uint64_t i_rsvd : 15; 2643 + uint64_t ii_ibna0_regval; 2644 + struct { 2645 + uint64_t i_rsvd_1:7; 2646 + uint64_t i_addr:42; 2647 + uint64_t i_rsvd:15; 2555 2648 } ii_ibna0_fld_s; 2556 2649 } ii_ibna0_u_t; 2557 2650 2558 - 2559 2651 /************************************************************************ 2560 - * * 2652 + * * 2561 2653 * This register contains the programmable level as well as the node * 2562 2654 * ID and PI unit of the processor to which the interrupt will be * 2563 - * sent. * 2564 - * * 2655 + * sent. * 2656 + * * 2565 2657 ************************************************************************/ 2566 2658 2567 2659 typedef union ii_ibia0_u { 2568 - uint64_t ii_ibia0_regval; 2569 - struct { 2570 - uint64_t i_rsvd_2 : 1; 2571 - uint64_t i_node_id : 11; 2572 - uint64_t i_rsvd_1 : 4; 2573 - uint64_t i_level : 7; 2574 - uint64_t i_rsvd : 41; 2660 + uint64_t ii_ibia0_regval; 2661 + struct { 2662 + uint64_t i_rsvd_2:1; 2663 + uint64_t i_node_id:11; 2664 + uint64_t i_rsvd_1:4; 2665 + uint64_t i_level:7; 2666 + uint64_t i_rsvd:41; 2575 2667 } ii_ibia0_fld_s; 2576 2668 } ii_ibia0_u_t; 2577 2669 2578 - 2579 2670 /************************************************************************ 2580 - * * 2671 + * * 2581 2672 * Description: This register is used to set up the length for a * 2582 2673 * transfer and then to monitor the progress of that transfer. This * 2583 2674 * register needs to be initialized before a transfer is started. A * ··· 2585 2682 * transfer completes, hardware will clear the Busy bit. The length * 2586 2683 * field will also contain the number of cache lines left to be * 2587 2684 * transferred. * 2588 - * * 2685 + * * 2589 2686 ************************************************************************/ 2590 2687 2591 2688 typedef union ii_ibls1_u { 2592 - uint64_t ii_ibls1_regval; 2593 - struct { 2594 - uint64_t i_length : 16; 2595 - uint64_t i_error : 1; 2596 - uint64_t i_rsvd_1 : 3; 2597 - uint64_t i_busy : 1; 2598 - uint64_t i_rsvd : 43; 2689 + uint64_t ii_ibls1_regval; 2690 + struct { 2691 + uint64_t i_length:16; 2692 + uint64_t i_error:1; 2693 + uint64_t i_rsvd_1:3; 2694 + uint64_t i_busy:1; 2695 + uint64_t i_rsvd:43; 2599 2696 } ii_ibls1_fld_s; 2600 2697 } ii_ibls1_u_t; 2601 2698 2602 - 2603 2699 /************************************************************************ 2604 - * * 2700 + * * 2605 2701 * This register should be loaded before a transfer is started. The * 2606 2702 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2607 2703 * address as described in Section 1.3, Figure2 and Figure3. Since * 2608 2704 * the bottom 7 bits of the address are always taken to be zero, BTE * 2609 2705 * transfers are always cacheline-aligned. * 2610 - * * 2706 + * * 2611 2707 ************************************************************************/ 2612 2708 2613 2709 typedef union ii_ibsa1_u { 2614 - uint64_t ii_ibsa1_regval; 2615 - struct { 2616 - uint64_t i_rsvd_1 : 7; 2617 - uint64_t i_addr : 33; 2618 - uint64_t i_rsvd : 24; 2710 + uint64_t ii_ibsa1_regval; 2711 + struct { 2712 + uint64_t i_rsvd_1:7; 2713 + uint64_t i_addr:33; 2714 + uint64_t i_rsvd:24; 2619 2715 } ii_ibsa1_fld_s; 2620 2716 } ii_ibsa1_u_t; 2621 2717 2622 - 2623 2718 /************************************************************************ 2624 - * * 2719 + * * 2625 2720 * This register should be loaded before a transfer is started. The * 2626 2721 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2627 2722 * address as described in Section 1.3, Figure2 and Figure3. Since * 2628 2723 * the bottom 7 bits of the address are always taken to be zero, BTE * 2629 2724 * transfers are always cacheline-aligned. * 2630 - * * 2725 + * * 2631 2726 ************************************************************************/ 2632 2727 2633 2728 typedef union ii_ibda1_u { 2634 - uint64_t ii_ibda1_regval; 2635 - struct { 2636 - uint64_t i_rsvd_1 : 7; 2637 - uint64_t i_addr : 33; 2638 - uint64_t i_rsvd : 24; 2729 + uint64_t ii_ibda1_regval; 2730 + struct { 2731 + uint64_t i_rsvd_1:7; 2732 + uint64_t i_addr:33; 2733 + uint64_t i_rsvd:24; 2639 2734 } ii_ibda1_fld_s; 2640 2735 } ii_ibda1_u_t; 2641 2736 2642 - 2643 2737 /************************************************************************ 2644 - * * 2738 + * * 2645 2739 * Writing to this register sets up the attributes of the transfer * 2646 2740 * and initiates the transfer operation. Reading this register has * 2647 2741 * the side effect of terminating any transfer in progress. Note: * ··· 2646 2746 * other BTE. If a BTE stream has to be stopped (due to error * 2647 2747 * handling for example), both BTE streams should be stopped and * 2648 2748 * their transfers discarded. * 2649 - * * 2749 + * * 2650 2750 ************************************************************************/ 2651 2751 2652 2752 typedef union ii_ibct1_u { 2653 - uint64_t ii_ibct1_regval; 2654 - struct { 2655 - uint64_t i_zerofill : 1; 2656 - uint64_t i_rsvd_2 : 3; 2657 - uint64_t i_notify : 1; 2658 - uint64_t i_rsvd_1 : 3; 2659 - uint64_t i_poison : 1; 2660 - uint64_t i_rsvd : 55; 2753 + uint64_t ii_ibct1_regval; 2754 + struct { 2755 + uint64_t i_zerofill:1; 2756 + uint64_t i_rsvd_2:3; 2757 + uint64_t i_notify:1; 2758 + uint64_t i_rsvd_1:3; 2759 + uint64_t i_poison:1; 2760 + uint64_t i_rsvd:55; 2661 2761 } ii_ibct1_fld_s; 2662 2762 } ii_ibct1_u_t; 2663 2763 2664 - 2665 2764 /************************************************************************ 2666 - * * 2765 + * * 2667 2766 * This register contains the address to which the WINV is sent. * 2668 2767 * This address has to be cache line aligned. * 2669 - * * 2768 + * * 2670 2769 ************************************************************************/ 2671 2770 2672 2771 typedef union ii_ibna1_u { 2673 - uint64_t ii_ibna1_regval; 2674 - struct { 2675 - uint64_t i_rsvd_1 : 7; 2676 - uint64_t i_addr : 33; 2677 - uint64_t i_rsvd : 24; 2772 + uint64_t ii_ibna1_regval; 2773 + struct { 2774 + uint64_t i_rsvd_1:7; 2775 + uint64_t i_addr:33; 2776 + uint64_t i_rsvd:24; 2678 2777 } ii_ibna1_fld_s; 2679 2778 } ii_ibna1_u_t; 2680 2779 2681 - 2682 2780 /************************************************************************ 2683 - * * 2781 + * * 2684 2782 * This register contains the programmable level as well as the node * 2685 2783 * ID and PI unit of the processor to which the interrupt will be * 2686 - * sent. * 2687 - * * 2784 + * sent. * 2785 + * * 2688 2786 ************************************************************************/ 2689 2787 2690 2788 typedef union ii_ibia1_u { 2691 - uint64_t ii_ibia1_regval; 2692 - struct { 2693 - uint64_t i_pi_id : 1; 2694 - uint64_t i_node_id : 8; 2695 - uint64_t i_rsvd_1 : 7; 2696 - uint64_t i_level : 7; 2697 - uint64_t i_rsvd : 41; 2789 + uint64_t ii_ibia1_regval; 2790 + struct { 2791 + uint64_t i_pi_id:1; 2792 + uint64_t i_node_id:8; 2793 + uint64_t i_rsvd_1:7; 2794 + uint64_t i_level:7; 2795 + uint64_t i_rsvd:41; 2698 2796 } ii_ibia1_fld_s; 2699 2797 } ii_ibia1_u_t; 2700 2798 2701 - 2702 2799 /************************************************************************ 2703 - * * 2800 + * * 2704 2801 * This register defines the resources that feed information into * 2705 2802 * the two performance counters located in the IO Performance * 2706 2803 * Profiling Register. There are 17 different quantities that can be * ··· 2708 2811 * other is available from the other performance counter. Hence, the * 2709 2812 * II supports all 17*16=272 possible combinations of quantities to * 2710 2813 * measure. * 2711 - * * 2814 + * * 2712 2815 ************************************************************************/ 2713 2816 2714 2817 typedef union ii_ipcr_u { 2715 - uint64_t ii_ipcr_regval; 2716 - struct { 2717 - uint64_t i_ippr0_c : 4; 2718 - uint64_t i_ippr1_c : 4; 2719 - uint64_t i_icct : 8; 2720 - uint64_t i_rsvd : 48; 2818 + uint64_t ii_ipcr_regval; 2819 + struct { 2820 + uint64_t i_ippr0_c:4; 2821 + uint64_t i_ippr1_c:4; 2822 + uint64_t i_icct:8; 2823 + uint64_t i_rsvd:48; 2721 2824 } ii_ipcr_fld_s; 2722 2825 } ii_ipcr_u_t; 2723 2826 2724 - 2725 2827 /************************************************************************ 2726 - * * 2727 - * * 2728 - * * 2828 + * * 2829 + * * 2830 + * * 2729 2831 ************************************************************************/ 2730 2832 2731 2833 typedef union ii_ippr_u { 2732 - uint64_t ii_ippr_regval; 2733 - struct { 2734 - uint64_t i_ippr0 : 32; 2735 - uint64_t i_ippr1 : 32; 2834 + uint64_t ii_ippr_regval; 2835 + struct { 2836 + uint64_t i_ippr0:32; 2837 + uint64_t i_ippr1:32; 2736 2838 } ii_ippr_fld_s; 2737 2839 } ii_ippr_u_t; 2738 2840 2739 - 2740 - 2741 - /************************************************************************** 2742 - * * 2743 - * The following defines which were not formed into structures are * 2744 - * probably indentical to another register, and the name of the * 2745 - * register is provided against each of these registers. This * 2746 - * information needs to be checked carefully * 2747 - * * 2748 - * IIO_ICRB1_A IIO_ICRB0_A * 2749 - * IIO_ICRB1_B IIO_ICRB0_B * 2750 - * IIO_ICRB1_C IIO_ICRB0_C * 2751 - * IIO_ICRB1_D IIO_ICRB0_D * 2752 - * IIO_ICRB1_E IIO_ICRB0_E * 2753 - * IIO_ICRB2_A IIO_ICRB0_A * 2754 - * IIO_ICRB2_B IIO_ICRB0_B * 2755 - * IIO_ICRB2_C IIO_ICRB0_C * 2756 - * IIO_ICRB2_D IIO_ICRB0_D * 2757 - * IIO_ICRB2_E IIO_ICRB0_E * 2758 - * IIO_ICRB3_A IIO_ICRB0_A * 2759 - * IIO_ICRB3_B IIO_ICRB0_B * 2760 - * IIO_ICRB3_C IIO_ICRB0_C * 2761 - * IIO_ICRB3_D IIO_ICRB0_D * 2762 - * IIO_ICRB3_E IIO_ICRB0_E * 2763 - * IIO_ICRB4_A IIO_ICRB0_A * 2764 - * IIO_ICRB4_B IIO_ICRB0_B * 2765 - * IIO_ICRB4_C IIO_ICRB0_C * 2766 - * IIO_ICRB4_D IIO_ICRB0_D * 2767 - * IIO_ICRB4_E IIO_ICRB0_E * 2768 - * IIO_ICRB5_A IIO_ICRB0_A * 2769 - * IIO_ICRB5_B IIO_ICRB0_B * 2770 - * IIO_ICRB5_C IIO_ICRB0_C * 2771 - * IIO_ICRB5_D IIO_ICRB0_D * 2772 - * IIO_ICRB5_E IIO_ICRB0_E * 2773 - * IIO_ICRB6_A IIO_ICRB0_A * 2774 - * IIO_ICRB6_B IIO_ICRB0_B * 2775 - * IIO_ICRB6_C IIO_ICRB0_C * 2776 - * IIO_ICRB6_D IIO_ICRB0_D * 2777 - * IIO_ICRB6_E IIO_ICRB0_E * 2778 - * IIO_ICRB7_A IIO_ICRB0_A * 2779 - * IIO_ICRB7_B IIO_ICRB0_B * 2780 - * IIO_ICRB7_C IIO_ICRB0_C * 2781 - * IIO_ICRB7_D IIO_ICRB0_D * 2782 - * IIO_ICRB7_E IIO_ICRB0_E * 2783 - * IIO_ICRB8_A IIO_ICRB0_A * 2784 - * IIO_ICRB8_B IIO_ICRB0_B * 2785 - * IIO_ICRB8_C IIO_ICRB0_C * 2786 - * IIO_ICRB8_D IIO_ICRB0_D * 2787 - * IIO_ICRB8_E IIO_ICRB0_E * 2788 - * IIO_ICRB9_A IIO_ICRB0_A * 2789 - * IIO_ICRB9_B IIO_ICRB0_B * 2790 - * IIO_ICRB9_C IIO_ICRB0_C * 2791 - * IIO_ICRB9_D IIO_ICRB0_D * 2792 - * IIO_ICRB9_E IIO_ICRB0_E * 2793 - * IIO_ICRBA_A IIO_ICRB0_A * 2794 - * IIO_ICRBA_B IIO_ICRB0_B * 2795 - * IIO_ICRBA_C IIO_ICRB0_C * 2796 - * IIO_ICRBA_D IIO_ICRB0_D * 2797 - * IIO_ICRBA_E IIO_ICRB0_E * 2798 - * IIO_ICRBB_A IIO_ICRB0_A * 2799 - * IIO_ICRBB_B IIO_ICRB0_B * 2800 - * IIO_ICRBB_C IIO_ICRB0_C * 2801 - * IIO_ICRBB_D IIO_ICRB0_D * 2802 - * IIO_ICRBB_E IIO_ICRB0_E * 2803 - * IIO_ICRBC_A IIO_ICRB0_A * 2804 - * IIO_ICRBC_B IIO_ICRB0_B * 2805 - * IIO_ICRBC_C IIO_ICRB0_C * 2806 - * IIO_ICRBC_D IIO_ICRB0_D * 2807 - * IIO_ICRBC_E IIO_ICRB0_E * 2808 - * IIO_ICRBD_A IIO_ICRB0_A * 2809 - * IIO_ICRBD_B IIO_ICRB0_B * 2810 - * IIO_ICRBD_C IIO_ICRB0_C * 2811 - * IIO_ICRBD_D IIO_ICRB0_D * 2812 - * IIO_ICRBD_E IIO_ICRB0_E * 2813 - * IIO_ICRBE_A IIO_ICRB0_A * 2814 - * IIO_ICRBE_B IIO_ICRB0_B * 2815 - * IIO_ICRBE_C IIO_ICRB0_C * 2816 - * IIO_ICRBE_D IIO_ICRB0_D * 2817 - * IIO_ICRBE_E IIO_ICRB0_E * 2818 - * * 2819 - **************************************************************************/ 2820 - 2841 + /************************************************************************ 2842 + * * 2843 + * The following defines which were not formed into structures are * 2844 + * probably indentical to another register, and the name of the * 2845 + * register is provided against each of these registers. This * 2846 + * information needs to be checked carefully * 2847 + * * 2848 + * IIO_ICRB1_A IIO_ICRB0_A * 2849 + * IIO_ICRB1_B IIO_ICRB0_B * 2850 + * IIO_ICRB1_C IIO_ICRB0_C * 2851 + * IIO_ICRB1_D IIO_ICRB0_D * 2852 + * IIO_ICRB1_E IIO_ICRB0_E * 2853 + * IIO_ICRB2_A IIO_ICRB0_A * 2854 + * IIO_ICRB2_B IIO_ICRB0_B * 2855 + * IIO_ICRB2_C IIO_ICRB0_C * 2856 + * IIO_ICRB2_D IIO_ICRB0_D * 2857 + * IIO_ICRB2_E IIO_ICRB0_E * 2858 + * IIO_ICRB3_A IIO_ICRB0_A * 2859 + * IIO_ICRB3_B IIO_ICRB0_B * 2860 + * IIO_ICRB3_C IIO_ICRB0_C * 2861 + * IIO_ICRB3_D IIO_ICRB0_D * 2862 + * IIO_ICRB3_E IIO_ICRB0_E * 2863 + * IIO_ICRB4_A IIO_ICRB0_A * 2864 + * IIO_ICRB4_B IIO_ICRB0_B * 2865 + * IIO_ICRB4_C IIO_ICRB0_C * 2866 + * IIO_ICRB4_D IIO_ICRB0_D * 2867 + * IIO_ICRB4_E IIO_ICRB0_E * 2868 + * IIO_ICRB5_A IIO_ICRB0_A * 2869 + * IIO_ICRB5_B IIO_ICRB0_B * 2870 + * IIO_ICRB5_C IIO_ICRB0_C * 2871 + * IIO_ICRB5_D IIO_ICRB0_D * 2872 + * IIO_ICRB5_E IIO_ICRB0_E * 2873 + * IIO_ICRB6_A IIO_ICRB0_A * 2874 + * IIO_ICRB6_B IIO_ICRB0_B * 2875 + * IIO_ICRB6_C IIO_ICRB0_C * 2876 + * IIO_ICRB6_D IIO_ICRB0_D * 2877 + * IIO_ICRB6_E IIO_ICRB0_E * 2878 + * IIO_ICRB7_A IIO_ICRB0_A * 2879 + * IIO_ICRB7_B IIO_ICRB0_B * 2880 + * IIO_ICRB7_C IIO_ICRB0_C * 2881 + * IIO_ICRB7_D IIO_ICRB0_D * 2882 + * IIO_ICRB7_E IIO_ICRB0_E * 2883 + * IIO_ICRB8_A IIO_ICRB0_A * 2884 + * IIO_ICRB8_B IIO_ICRB0_B * 2885 + * IIO_ICRB8_C IIO_ICRB0_C * 2886 + * IIO_ICRB8_D IIO_ICRB0_D * 2887 + * IIO_ICRB8_E IIO_ICRB0_E * 2888 + * IIO_ICRB9_A IIO_ICRB0_A * 2889 + * IIO_ICRB9_B IIO_ICRB0_B * 2890 + * IIO_ICRB9_C IIO_ICRB0_C * 2891 + * IIO_ICRB9_D IIO_ICRB0_D * 2892 + * IIO_ICRB9_E IIO_ICRB0_E * 2893 + * IIO_ICRBA_A IIO_ICRB0_A * 2894 + * IIO_ICRBA_B IIO_ICRB0_B * 2895 + * IIO_ICRBA_C IIO_ICRB0_C * 2896 + * IIO_ICRBA_D IIO_ICRB0_D * 2897 + * IIO_ICRBA_E IIO_ICRB0_E * 2898 + * IIO_ICRBB_A IIO_ICRB0_A * 2899 + * IIO_ICRBB_B IIO_ICRB0_B * 2900 + * IIO_ICRBB_C IIO_ICRB0_C * 2901 + * IIO_ICRBB_D IIO_ICRB0_D * 2902 + * IIO_ICRBB_E IIO_ICRB0_E * 2903 + * IIO_ICRBC_A IIO_ICRB0_A * 2904 + * IIO_ICRBC_B IIO_ICRB0_B * 2905 + * IIO_ICRBC_C IIO_ICRB0_C * 2906 + * IIO_ICRBC_D IIO_ICRB0_D * 2907 + * IIO_ICRBC_E IIO_ICRB0_E * 2908 + * IIO_ICRBD_A IIO_ICRB0_A * 2909 + * IIO_ICRBD_B IIO_ICRB0_B * 2910 + * IIO_ICRBD_C IIO_ICRB0_C * 2911 + * IIO_ICRBD_D IIO_ICRB0_D * 2912 + * IIO_ICRBD_E IIO_ICRB0_E * 2913 + * IIO_ICRBE_A IIO_ICRB0_A * 2914 + * IIO_ICRBE_B IIO_ICRB0_B * 2915 + * IIO_ICRBE_C IIO_ICRB0_C * 2916 + * IIO_ICRBE_D IIO_ICRB0_D * 2917 + * IIO_ICRBE_E IIO_ICRB0_E * 2918 + * * 2919 + ************************************************************************/ 2821 2920 2822 2921 /* 2823 2922 * Slightly friendlier names for some common registers. 2824 2923 */ 2825 - #define IIO_WIDGET IIO_WID /* Widget identification */ 2826 - #define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ 2827 - #define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ 2828 - #define IIO_PROTECT IIO_ILAPR /* IO interface protection */ 2829 - #define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ 2830 - #define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ 2831 - #define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ 2832 - #define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ 2833 - #define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ 2834 - #define IIO_LLP_LOG IIO_ILLR /* LLP log */ 2835 - #define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/ 2836 - #define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ 2837 - #define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ 2924 + #define IIO_WIDGET IIO_WID /* Widget identification */ 2925 + #define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ 2926 + #define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ 2927 + #define IIO_PROTECT IIO_ILAPR /* IO interface protection */ 2928 + #define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ 2929 + #define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ 2930 + #define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ 2931 + #define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ 2932 + #define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ 2933 + #define IIO_LLP_LOG IIO_ILLR /* LLP log */ 2934 + #define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */ 2935 + #define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ 2936 + #define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ 2838 2937 #define IIO_IGFX_0 IIO_IGFX0 2839 2938 #define IIO_IGFX_1 IIO_IGFX1 2840 2939 #define IIO_IBCT_0 IIO_IBCT0 ··· 2850 2957 #define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x))) 2851 2958 #define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x))) 2852 2959 #define IIO_NUM_PRTES 8 /* Total number of PRB table entries */ 2853 - #define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ 2854 - #define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ 2960 + #define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ 2961 + #define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ 2855 2962 2856 - #define IIO_NUM_IPRBS (9) 2963 + #define IIO_NUM_IPRBS 9 2857 2964 2858 - #define IIO_LLP_CSR_IS_UP 0x00002000 2965 + #define IIO_LLP_CSR_IS_UP 0x00002000 2859 2966 #define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000 2860 2967 #define IIO_LLP_CSR_LLP_STAT_SHFT 12 2861 2968 ··· 2863 2970 #define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */ 2864 2971 2865 2972 /* key to IIO_PROTECT_OVRRD */ 2866 - #define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ 2973 + #define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ 2867 2974 2868 2975 /* BTE register names */ 2869 - #define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ 2870 - #define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ 2871 - #define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ 2872 - #define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ 2873 - #define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ 2874 - #define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ 2875 - #define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ 2876 - #define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ 2976 + #define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ 2977 + #define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ 2978 + #define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ 2979 + #define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ 2980 + #define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ 2981 + #define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ 2982 + #define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ 2983 + #define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ 2877 2984 2878 2985 /* BTE register offsets from base */ 2879 2986 #define BTEOFF_STAT 0 2880 - #define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) 2881 - #define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) 2882 - #define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) 2883 - #define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) 2884 - #define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) 2885 - 2987 + #define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) 2988 + #define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) 2989 + #define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) 2990 + #define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) 2991 + #define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) 2886 2992 2887 2993 /* names used in shub diags */ 2888 - #define IIO_BASE_BTE0 IIO_IBLS_0 2889 - #define IIO_BASE_BTE1 IIO_IBLS_1 2994 + #define IIO_BASE_BTE0 IIO_IBLS_0 2995 + #define IIO_BASE_BTE1 IIO_IBLS_1 2890 2996 2891 2997 /* 2892 2998 * Macro which takes the widget number, and returns the ··· 2893 3001 * value _x is expected to be a widget number in the range 2894 3002 * 0, 8 - 0xF 2895 3003 */ 2896 - #define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ 2897 - (_x) : \ 2898 - (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) 2899 - 3004 + #define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ 3005 + (_x) : \ 3006 + (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) 2900 3007 2901 3008 /* GFX Flow Control Node/Widget Register */ 2902 3009 #define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */ ··· 2915 3024 (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \ 2916 3025 (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \ 2917 3026 (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT)) 2918 - 2919 3027 2920 3028 /* Scratch registers (all bits available) */ 2921 3029 #define IIO_SCRATCH_REG0 IIO_ISCR0 ··· 2936 3046 #define IIO_SCRATCH_BIT1_0 0x0000000000000001UL 2937 3047 #define IIO_SCRATCH_BIT1_1 0x0000000000000002UL 2938 3048 /* IO Translation Table Entries */ 2939 - #define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ 2940 - /* Hw manuals number them 1..7! */ 3049 + #define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ 3050 + /* Hw manuals number them 1..7! */ 2941 3051 /* 2942 3052 * IIO_IMEM Register fields. 2943 3053 */ 2944 - #define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ 2945 - #define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ 2946 - #define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ 3054 + #define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ 3055 + #define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ 3056 + #define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ 2947 3057 2948 3058 /* 2949 3059 * As a permanent workaround for a bug in the PI side of the shub, we've 2950 3060 * redefined big window 7 as small window 0. 2951 3061 XXX does this still apply for SN1?? 2952 3062 */ 2953 - #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 3063 + #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 2954 3064 2955 3065 /* 2956 3066 * Use the top big window as a surrogate for the first small window ··· 2961 3071 2962 3072 /* 2963 3073 * CRB manipulation macros 2964 - * The CRB macros are slightly complicated, since there are up to 2965 - * four registers associated with each CRB entry. 3074 + * The CRB macros are slightly complicated, since there are up to 3075 + * four registers associated with each CRB entry. 2966 3076 */ 2967 - #define IIO_NUM_CRBS 15 /* Number of CRBs */ 2968 - #define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ 3077 + #define IIO_NUM_CRBS 15 /* Number of CRBs */ 3078 + #define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ 2969 3079 #define IIO_ICRB_OFFSET 8 2970 3080 #define IIO_ICRB_0 IIO_ICRB0_A 2971 3081 #define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */ ··· 2973 3083 #define IIO_FIRST_PC_ENTRY 12 2974 3084 */ 2975 3085 2976 - #define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) 2977 - #define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) 2978 - #define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) 2979 - #define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) 2980 - #define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) 3086 + #define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) 3087 + #define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) 3088 + #define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) 3089 + #define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) 3090 + #define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) 2981 3091 2982 3092 #define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7) 2983 3093 2984 3094 /* 2985 3095 * values for "ecode" field 2986 3096 */ 2987 - #define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ 2988 - #define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ 2989 - #define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access 2990 - * e.g. WINV to a Read only line. */ 2991 - #define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ 2992 - #define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ 2993 - #define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ 2994 - #define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ 2995 - #define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ 3097 + #define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ 3098 + #define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ 3099 + #define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access 3100 + * e.g. WINV to a Read only line. */ 3101 + #define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ 3102 + #define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ 3103 + #define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ 3104 + #define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ 3105 + #define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ 2996 3106 2997 3107 /* 2998 3108 * Values for field imsgtype 2999 3109 */ 3000 - #define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ 3001 - #define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ 3002 - #define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ 3003 - #define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ 3110 + #define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ 3111 + #define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ 3112 + #define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ 3113 + #define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ 3004 3114 3005 3115 /* 3006 3116 * values for field initiator. 3007 3117 */ 3008 - #define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ 3009 - #define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ 3010 - #define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ 3011 - #define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ 3012 - #define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ 3118 + #define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ 3119 + #define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ 3120 + #define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ 3121 + #define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ 3122 + #define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ 3013 3123 3014 3124 /* 3015 3125 * Number of credits Hub widget has while sending req/response to ··· 3017 3127 * Value of 3 is required by Xbow 1.1 3018 3128 * We may be able to increase this to 4 with Xbow 1.2. 3019 3129 */ 3020 - #define HUBII_XBOW_CREDIT 3 3021 - #define HUBII_XBOW_REV2_CREDIT 4 3130 + #define HUBII_XBOW_CREDIT 3 3131 + #define HUBII_XBOW_REV2_CREDIT 4 3022 3132 3023 3133 /* 3024 3134 * Number of credits that xtalk devices should use when communicating ··· 3049 3159 */ 3050 3160 3051 3161 #define IIO_ICMR_CRB_VLD_SHFT 20 3052 - #define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) 3162 + #define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) 3053 3163 3054 3164 #define IIO_ICMR_FC_CNT_SHFT 16 3055 - #define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) 3165 + #define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) 3056 3166 3057 3167 #define IIO_ICMR_C_CNT_SHFT 4 3058 - #define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) 3168 + #define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) 3059 3169 3060 - #define IIO_ICMR_PRECISE (1UL << 52) 3061 - #define IIO_ICMR_CLR_RPPD (1UL << 13) 3062 - #define IIO_ICMR_CLR_RQPD (1UL << 12) 3170 + #define IIO_ICMR_PRECISE (1UL << 52) 3171 + #define IIO_ICMR_CLR_RPPD (1UL << 13) 3172 + #define IIO_ICMR_CLR_RQPD (1UL << 12) 3063 3173 3064 3174 /* 3065 3175 * IIO PIO Deallocation register field masks : (IIO_IPDR) 3066 3176 XXX present but not needed in bedrock? See the manual. 3067 3177 */ 3068 - #define IIO_IPDR_PND (1 << 4) 3178 + #define IIO_IPDR_PND (1 << 4) 3069 3179 3070 3180 /* 3071 3181 * IIO CRB deallocation register field masks: (IIO_ICDR) 3072 3182 */ 3073 - #define IIO_ICDR_PND (1 << 4) 3183 + #define IIO_ICDR_PND (1 << 4) 3074 3184 3075 3185 /* 3076 3186 * IO BTE Length/Status (IIO_IBLS) register bit field definitions ··· 3113 3223 /* 3114 3224 * IO Error Clear register bit field definitions 3115 3225 */ 3116 - #define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ 3117 - #define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ 3118 - #define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ 3119 - #define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ 3120 - #define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ 3121 - #define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ 3122 - #define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ 3123 - #define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ 3124 - #define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ 3125 - #define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ 3126 - #define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ 3127 - #define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ 3128 - #define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ 3129 - #define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ 3130 - #define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ 3226 + #define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ 3227 + #define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ 3228 + #define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ 3229 + #define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ 3230 + #define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ 3231 + #define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ 3232 + #define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ 3233 + #define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ 3234 + #define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ 3235 + #define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ 3236 + #define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ 3237 + #define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ 3238 + #define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ 3239 + #define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ 3240 + #define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ 3131 3241 3132 3242 /* 3133 3243 * IIO CRB control register Fields: IIO_ICCR 3134 3244 */ 3135 - #define IIO_ICCR_PENDING (0x10000) 3136 - #define IIO_ICCR_CMD_MASK (0xFF) 3137 - #define IIO_ICCR_CMD_SHFT (7) 3138 - #define IIO_ICCR_CMD_NOP (0x0) /* No Op */ 3139 - #define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */ 3140 - #define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */ 3141 - #define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory 3245 + #define IIO_ICCR_PENDING 0x10000 3246 + #define IIO_ICCR_CMD_MASK 0xFF 3247 + #define IIO_ICCR_CMD_SHFT 7 3248 + #define IIO_ICCR_CMD_NOP 0x0 /* No Op */ 3249 + #define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */ 3250 + #define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */ 3251 + #define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory 3142 3252 * via a WB 3143 3253 */ 3144 - #define IIO_ICCR_CMD_FLUSH (0x800) 3254 + #define IIO_ICCR_CMD_FLUSH 0x800 3145 3255 3146 3256 /* 3147 3257 * ··· 3173 3283 * Easy access macros for CRBs, all 5 registers (A-E) 3174 3284 */ 3175 3285 typedef ii_icrb0_a_u_t icrba_t; 3176 - #define a_sidn ii_icrb0_a_fld_s.ia_sidn 3177 - #define a_tnum ii_icrb0_a_fld_s.ia_tnum 3286 + #define a_sidn ii_icrb0_a_fld_s.ia_sidn 3287 + #define a_tnum ii_icrb0_a_fld_s.ia_tnum 3178 3288 #define a_addr ii_icrb0_a_fld_s.ia_addr 3179 3289 #define a_valid ii_icrb0_a_fld_s.ia_vld 3180 3290 #define a_iow ii_icrb0_a_fld_s.ia_iow ··· 3214 3324 #define c_source ii_icrb0_c_fld_s.ic_source 3215 3325 #define c_regvalue ii_icrb0_c_regval 3216 3326 3217 - 3218 3327 typedef ii_icrb0_d_u_t icrbd_t; 3219 3328 #define d_sleep ii_icrb0_d_fld_s.id_sleep 3220 3329 #define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt 3221 3330 #define d_pripsc ii_icrb0_d_fld_s.id_pr_psc 3222 3331 #define d_bteop ii_icrb0_d_fld_s.id_bte_op 3223 - #define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ 3224 - #define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ 3332 + #define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */ 3333 + #define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */ 3225 3334 #define d_regvalue ii_icrb0_d_regval 3226 3335 3227 3336 typedef ii_icrb0_e_u_t icrbe_t; ··· 3229 3340 #define icrbe_context ii_icrb0_e_fld_s.ie_context 3230 3341 #define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout 3231 3342 #define e_regvalue ii_icrb0_e_regval 3232 - 3233 3343 3234 3344 /* Number of widgets supported by shub */ 3235 3345 #define HUB_NUM_WIDGET 9 ··· 3255 3367 3256 3368 #define LNK_STAT_WORKING 0x2 /* LLP is working */ 3257 3369 3258 - #define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ 3259 - #define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ 3260 - #define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */ 3261 - #define IIO_WSTAT_TXRETRY_SHFT (16) 3262 - #define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ 3263 - IIO_WSTAT_TXRETRY_MASK) 3370 + #define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ 3371 + #define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ 3372 + #define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */ 3373 + #define IIO_WSTAT_TXRETRY_SHFT 16 3374 + #define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ 3375 + IIO_WSTAT_TXRETRY_MASK) 3264 3376 3265 3377 /* Number of II perf. counters we can multiplex at once */ 3266 3378 3267 3379 #define IO_PERF_SETS 32 3268 3380 3269 3381 /* Bit for the widget in inbound access register */ 3270 - #define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) 3382 + #define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) 3271 3383 /* Bit for the widget in outbound access register */ 3272 - #define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) 3384 + #define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) 3273 3385 3274 3386 /* NOTE: The following define assumes that we are going to get 3275 3387 * widget numbers from 8 thru F and the device numbers within 3276 3388 * widget from 0 thru 7. 3277 3389 */ 3278 - #define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d)))) 3390 + #define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d)))) 3279 3391 3280 3392 /* IO Interrupt Destination Register */ 3281 3393 #define IIO_IIDSR_SENT_SHIFT 28 ··· 3290 3402 #define IIO_IIDSR_LVL_MASK 0x000000ff 3291 3403 3292 3404 /* Xtalk timeout threshhold register (IIO_IXTT) */ 3293 - #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ 3405 + #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ 3294 3406 #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) 3295 - #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ 3407 + #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ 3296 3408 #define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT) 3297 - #define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ 3409 + #define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ 3298 3410 #define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT) 3299 3411 3300 3412 /* ··· 3302 3414 */ 3303 3415 3304 3416 typedef union hubii_wcr_u { 3305 - uint64_t wcr_reg_value; 3306 - struct { 3307 - uint64_t wcr_widget_id: 4, /* LLP crossbar credit */ 3308 - wcr_tag_mode: 1, /* Tag mode */ 3309 - wcr_rsvd1: 8, /* Reserved */ 3310 - wcr_xbar_crd: 3, /* LLP crossbar credit */ 3311 - wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */ 3312 - wcr_dir_con: 1, /* widget direct connect */ 3313 - wcr_e_thresh: 5, /* elasticity threshold */ 3314 - wcr_rsvd: 41; /* unused */ 3315 - } wcr_fields_s; 3417 + uint64_t wcr_reg_value; 3418 + struct { 3419 + uint64_t wcr_widget_id:4, /* LLP crossbar credit */ 3420 + wcr_tag_mode:1, /* Tag mode */ 3421 + wcr_rsvd1:8, /* Reserved */ 3422 + wcr_xbar_crd:3, /* LLP crossbar credit */ 3423 + wcr_f_bad_pkt:1, /* Force bad llp pkt enable */ 3424 + wcr_dir_con:1, /* widget direct connect */ 3425 + wcr_e_thresh:5, /* elasticity threshold */ 3426 + wcr_rsvd:41; /* unused */ 3427 + } wcr_fields_s; 3316 3428 } hubii_wcr_t; 3317 3429 3318 3430 #define iwcr_dir_con wcr_fields_s.wcr_dir_con ··· 3324 3436 performed */ 3325 3437 3326 3438 typedef union io_perf_sel { 3327 - uint64_t perf_sel_reg; 3328 - struct { 3329 - uint64_t perf_ippr0 : 4, 3330 - perf_ippr1 : 4, 3331 - perf_icct : 8, 3332 - perf_rsvd : 48; 3333 - } perf_sel_bits; 3439 + uint64_t perf_sel_reg; 3440 + struct { 3441 + uint64_t perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48; 3442 + } perf_sel_bits; 3334 3443 } io_perf_sel_t; 3335 3444 3336 3445 /* io_perf_cnt is to extract the count from the shub registers. Due to 3337 3446 hardware problems there is only one counter, not two. */ 3338 3447 3339 3448 typedef union io_perf_cnt { 3340 - uint64_t perf_cnt; 3341 - struct { 3342 - uint64_t perf_cnt : 20, 3343 - perf_rsvd2 : 12, 3344 - perf_rsvd1 : 32; 3345 - } perf_cnt_bits; 3449 + uint64_t perf_cnt; 3450 + struct { 3451 + uint64_t perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32; 3452 + } perf_cnt_bits; 3346 3453 3347 3454 } io_perf_cnt_t; 3348 3455 3349 3456 typedef union iprte_a { 3350 - uint64_t entry; 3457 + uint64_t entry; 3351 3458 struct { 3352 - uint64_t i_rsvd_1 : 3; 3353 - uint64_t i_addr : 38; 3354 - uint64_t i_init : 3; 3355 - uint64_t i_source : 8; 3356 - uint64_t i_rsvd : 2; 3357 - uint64_t i_widget : 4; 3358 - uint64_t i_to_cnt : 5; 3359 - uint64_t i_vld : 1; 3459 + uint64_t i_rsvd_1:3; 3460 + uint64_t i_addr:38; 3461 + uint64_t i_init:3; 3462 + uint64_t i_source:8; 3463 + uint64_t i_rsvd:2; 3464 + uint64_t i_widget:4; 3465 + uint64_t i_to_cnt:5; 3466 + uint64_t i_vld:1; 3360 3467 } iprte_fields; 3361 3468 } iprte_a_t; 3362 3469 3363 - #endif /* _ASM_IA64_SN_SHUBIO_H */ 3364 - 3470 + #endif /* _ASM_IA64_SN_SHUBIO_H */
+11 -14
include/asm-ia64/sn/sn_cpuid.h
··· 4 4 * License. See the file "COPYING" in the main directory of this archive 5 5 * for more details. 6 6 * 7 - * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. 7 + * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. 8 8 */ 9 9 10 10 ··· 92 92 * NOTE: on non-MP systems, only cpuid 0 exists 93 93 */ 94 94 95 - extern short physical_node_map[]; /* indexed by nasid to get cnode */ 95 + extern short physical_node_map[]; /* indexed by nasid to get cnode */ 96 96 97 97 /* 98 98 * Macros for retrieving info about current cpu 99 99 */ 100 - #define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid) 101 - #define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode) 102 - #define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice) 103 - #define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode) 104 - #define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) 100 + #define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid) 101 + #define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode) 102 + #define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice) 103 + #define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode) 104 + #define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) 105 105 106 106 /* 107 107 * Macros for retrieving info about an arbitrary cpu 108 108 * cpuid - logical cpu id 109 109 */ 110 - #define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid) 111 - #define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode) 112 - #define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice) 110 + #define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid) 111 + #define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode) 112 + #define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice) 113 113 #define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)]) 114 114 115 115 ··· 123 123 124 124 /* 125 125 * cnodeid_to_nasid - convert a cnodeid to a NASID 126 - * Macro relies on pg_data for a node being on the node itself. 127 - * Just extract the NASID from the pointer. 128 - * 129 126 */ 130 - #define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid] 127 + #define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid]) 131 128 132 129 /* 133 130 * nasid_to_cnodeid - convert a NASID to a cnodeid
-44
include/asm-ia64/sn/sn_fru.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved. 7 - */ 8 - #ifndef _ASM_IA64_SN_SN_FRU_H 9 - #define _ASM_IA64_SN_SN_FRU_H 10 - 11 - #define MAX_DIMMS 8 /* max # of dimm banks */ 12 - #define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */ 13 - 14 - typedef unsigned char confidence_t; 15 - 16 - typedef struct kf_mem_s { 17 - confidence_t km_confidence; /* confidence level that the memory is bad 18 - * is this necessary ? 19 - */ 20 - confidence_t km_dimm[MAX_DIMMS]; 21 - /* confidence level that dimm[i] is bad 22 - *I think this is the right number 23 - */ 24 - 25 - } kf_mem_t; 26 - 27 - typedef struct kf_cpu_s { 28 - confidence_t kc_confidence; /* confidence level that cpu is bad */ 29 - confidence_t kc_icache; /* confidence level that instr. cache is bad */ 30 - confidence_t kc_dcache; /* confidence level that data cache is bad */ 31 - confidence_t kc_scache; /* confidence level that sec. cache is bad */ 32 - confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */ 33 - } kf_cpu_t; 34 - 35 - 36 - typedef struct kf_pci_bus_s { 37 - confidence_t kpb_belief; /* confidence level that the pci bus is bad */ 38 - confidence_t kpb_pcidev_belief[MAX_PCIDEV]; 39 - /* confidence level that the pci dev is bad */ 40 - } kf_pci_bus_t; 41 - 42 - 43 - #endif /* _ASM_IA64_SN_SN_FRU_H */ 44 -
+46 -19
include/asm-ia64/sn/sn_sal.h
··· 557 557 ia64_sn_partition_serial_get(void) 558 558 { 559 559 struct ia64_sal_retval ret_stuff; 560 - SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0); 560 + ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 561 + 0, 0, 0, 0, 0, 0); 561 562 if (ret_stuff.status != 0) 562 563 return 0; 563 564 return ret_stuff.v0; ··· 566 565 567 566 static inline u64 568 567 sn_partition_serial_number_val(void) { 569 - if (sn_partition_serial_number) { 570 - return(sn_partition_serial_number); 571 - } else { 572 - return(sn_partition_serial_number = ia64_sn_partition_serial_get()); 568 + if (unlikely(sn_partition_serial_number == 0)) { 569 + sn_partition_serial_number = ia64_sn_partition_serial_get(); 573 570 } 571 + return sn_partition_serial_number; 574 572 } 575 573 576 574 /* ··· 580 580 ia64_sn_sysctl_partition_get(nasid_t nasid) 581 581 { 582 582 struct ia64_sal_retval ret_stuff; 583 - SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, 584 - 0, 0, 0, 0, 0, 0); 583 + ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, 584 + 0, 0, 0, 0, 0, 0); 585 585 if (ret_stuff.status != 0) 586 586 return INVALID_PARTID; 587 587 return ((partid_t)ret_stuff.v0); ··· 595 595 596 596 static inline partid_t 597 597 sn_local_partid(void) { 598 - if (sn_partid < 0) { 599 - return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()))); 600 - } else { 601 - return sn_partid; 598 + if (unlikely(sn_partid < 0)) { 599 + sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())); 602 600 } 601 + return sn_partid; 602 + } 603 + 604 + /* 605 + * Returns the physical address of the partition's reserved page through 606 + * an iterative number of calls. 607 + * 608 + * On first call, 'cookie' and 'len' should be set to 0, and 'addr' 609 + * set to the nasid of the partition whose reserved page's address is 610 + * being sought. 611 + * On subsequent calls, pass the values, that were passed back on the 612 + * previous call. 613 + * 614 + * While the return status equals SALRET_MORE_PASSES, keep calling 615 + * this function after first copying 'len' bytes starting at 'addr' 616 + * into 'buf'. Once the return status equals SALRET_OK, 'addr' will 617 + * be the physical address of the partition's reserved page. If the 618 + * return status equals neither of these, an error as occurred. 619 + */ 620 + static inline s64 621 + sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len) 622 + { 623 + struct ia64_sal_retval rv; 624 + ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie, 625 + *addr, buf, *len, 0, 0, 0); 626 + *cookie = rv.v0; 627 + *addr = rv.v1; 628 + *len = rv.v2; 629 + return rv.status; 603 630 } 604 631 605 632 /* ··· 648 621 sn_register_xp_addr_region(u64 paddr, u64 len, int operation) 649 622 { 650 623 struct ia64_sal_retval ret_stuff; 651 - SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation, 652 - 0, 0, 0, 0); 624 + ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, 625 + (u64)operation, 0, 0, 0, 0); 653 626 return ret_stuff.status; 654 627 } 655 628 ··· 673 646 } else { 674 647 call = SN_SAL_NO_FAULT_ZONE_PHYSICAL; 675 648 } 676 - SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1, 677 - 0, 0, 0); 649 + ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr, 650 + (u64)1, 0, 0, 0); 678 651 return ret_stuff.status; 679 652 } 680 653 ··· 695 668 sn_change_coherence(u64 *new_domain, u64 *old_domain) 696 669 { 697 670 struct ia64_sal_retval ret_stuff; 698 - SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0, 699 - 0, 0, 0); 671 + ia64_sal_oemcall(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain, 672 + (u64)old_domain, 0, 0, 0, 0, 0); 700 673 return ret_stuff.status; 701 674 } 702 675 ··· 715 688 cnodeid = nasid_to_cnodeid(get_node_number(paddr)); 716 689 // spin_lock(&NODEPDA(cnodeid)->bist_lock); 717 690 local_irq_save(irq_flags); 718 - SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array, 719 - perms, 0, 0, 0); 691 + ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, 692 + (u64)nasid_array, perms, 0, 0, 0); 720 693 local_irq_restore(irq_flags); 721 694 // spin_unlock(&NODEPDA(cnodeid)->bist_lock); 722 695 return ret_stuff.status;
-47
include/asm-ia64/sn/sndrv.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. 7 - */ 8 - 9 - #ifndef _ASM_IA64_SN_SNDRV_H 10 - #define _ASM_IA64_SN_SNDRV_H 11 - 12 - /* ioctl commands */ 13 - #define SNDRV_GET_ROUTERINFO 1 14 - #define SNDRV_GET_INFOSIZE 2 15 - #define SNDRV_GET_HUBINFO 3 16 - #define SNDRV_GET_FLASHLOGSIZE 4 17 - #define SNDRV_SET_FLASHSYNC 5 18 - #define SNDRV_GET_FLASHLOGDATA 6 19 - #define SNDRV_GET_FLASHLOGALL 7 20 - 21 - #define SNDRV_SET_HISTOGRAM_TYPE 14 22 - 23 - #define SNDRV_ELSC_COMMAND 19 24 - #define SNDRV_CLEAR_LOG 20 25 - #define SNDRV_INIT_LOG 21 26 - #define SNDRV_GET_PIMM_PSC 22 27 - #define SNDRV_SET_PARTITION 23 28 - #define SNDRV_GET_PARTITION 24 29 - 30 - /* see synergy_perf_ioctl() */ 31 - #define SNDRV_GET_SYNERGY_VERSION 30 32 - #define SNDRV_GET_SYNERGY_STATUS 31 33 - #define SNDRV_GET_SYNERGYINFO 32 34 - #define SNDRV_SYNERGY_APPEND 33 35 - #define SNDRV_SYNERGY_ENABLE 34 36 - #define SNDRV_SYNERGY_FREQ 35 37 - 38 - /* Devices */ 39 - #define SNDRV_UKNOWN_DEVICE -1 40 - #define SNDRV_ROUTER_DEVICE 1 41 - #define SNDRV_HUB_DEVICE 2 42 - #define SNDRV_ELSC_NVRAM_DEVICE 3 43 - #define SNDRV_ELSC_CONTROLLER_DEVICE 4 44 - #define SNDRV_SYSCTL_SUBCH 5 45 - #define SNDRV_SYNERGY_DEVICE 6 46 - 47 - #endif /* _ASM_IA64_SN_SNDRV_H */
+436
include/asm-ia64/sn/xp.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved. 7 + */ 8 + 9 + 10 + /* 11 + * External Cross Partition (XP) structures and defines. 12 + */ 13 + 14 + 15 + #ifndef _ASM_IA64_SN_XP_H 16 + #define _ASM_IA64_SN_XP_H 17 + 18 + 19 + #include <linux/version.h> 20 + #include <linux/cache.h> 21 + #include <linux/hardirq.h> 22 + #include <asm/sn/types.h> 23 + #include <asm/sn/bte.h> 24 + 25 + 26 + #ifdef USE_DBUG_ON 27 + #define DBUG_ON(condition) BUG_ON(condition) 28 + #else 29 + #define DBUG_ON(condition) 30 + #endif 31 + 32 + 33 + /* 34 + * Define the maximum number of logically defined partitions the system 35 + * can support. It is constrained by the maximum number of hardware 36 + * partitionable regions. The term 'region' in this context refers to the 37 + * minimum number of nodes that can comprise an access protection grouping. 38 + * The access protection is in regards to memory, IPI and IOI. 39 + * 40 + * The maximum number of hardware partitionable regions is equal to the 41 + * maximum number of nodes in the entire system divided by the minimum number 42 + * of nodes that comprise an access protection grouping. 43 + */ 44 + #define XP_MAX_PARTITIONS 64 45 + 46 + 47 + /* 48 + * Define the number of u64s required to represent all the C-brick nasids 49 + * as a bitmap. The cross-partition kernel modules deal only with 50 + * C-brick nasids, thus the need for bitmaps which don't account for 51 + * odd-numbered (non C-brick) nasids. 52 + */ 53 + #define XP_MAX_PHYSNODE_ID (MAX_PHYSNODE_ID / 2) 54 + #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) 55 + #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) 56 + 57 + 58 + /* 59 + * Wrapper for bte_copy() that should it return a failure status will retry 60 + * the bte_copy() once in the hope that the failure was due to a temporary 61 + * aberration (i.e., the link going down temporarily). 62 + * 63 + * See bte_copy for definition of the input parameters. 64 + * 65 + * Note: xp_bte_copy() should never be called while holding a spinlock. 66 + */ 67 + static inline bte_result_t 68 + xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) 69 + { 70 + bte_result_t ret; 71 + 72 + 73 + ret = bte_copy(src, dest, len, mode, notification); 74 + 75 + if (ret != BTE_SUCCESS) { 76 + if (!in_interrupt()) { 77 + cond_resched(); 78 + } 79 + ret = bte_copy(src, dest, len, mode, notification); 80 + } 81 + 82 + return ret; 83 + } 84 + 85 + 86 + /* 87 + * XPC establishes channel connections between the local partition and any 88 + * other partition that is currently up. Over these channels, kernel-level 89 + * `users' can communicate with their counterparts on the other partitions. 90 + * 91 + * The maxinum number of channels is limited to eight. For performance reasons, 92 + * the internal cross partition structures require sixteen bytes per channel, 93 + * and eight allows all of this interface-shared info to fit in one cache line. 94 + * 95 + * XPC_NCHANNELS reflects the total number of channels currently defined. 96 + * If the need for additional channels arises, one can simply increase 97 + * XPC_NCHANNELS accordingly. If the day should come where that number 98 + * exceeds the MAXIMUM number of channels allowed (eight), then one will need 99 + * to make changes to the XPC code to allow for this. 100 + */ 101 + #define XPC_MEM_CHANNEL 0 /* memory channel number */ 102 + #define XPC_NET_CHANNEL 1 /* network channel number */ 103 + 104 + #define XPC_NCHANNELS 2 /* #of defined channels */ 105 + #define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */ 106 + 107 + #if XPC_NCHANNELS > XPC_MAX_NCHANNELS 108 + #error XPC_NCHANNELS exceeds MAXIMUM allowed. 109 + #endif 110 + 111 + 112 + /* 113 + * The format of an XPC message is as follows: 114 + * 115 + * +-------+--------------------------------+ 116 + * | flags |////////////////////////////////| 117 + * +-------+--------------------------------+ 118 + * | message # | 119 + * +----------------------------------------+ 120 + * | payload (user-defined message) | 121 + * | | 122 + * : 123 + * | | 124 + * +----------------------------------------+ 125 + * 126 + * The size of the payload is defined by the user via xpc_connect(). A user- 127 + * defined message resides in the payload area. 128 + * 129 + * The user should have no dealings with the message header, but only the 130 + * message's payload. When a message entry is allocated (via xpc_allocate()) 131 + * a pointer to the payload area is returned and not the actual beginning of 132 + * the XPC message. The user then constructs a message in the payload area 133 + * and passes that pointer as an argument on xpc_send() or xpc_send_notify(). 134 + * 135 + * The size of a message entry (within a message queue) must be a cacheline 136 + * sized multiple in order to facilitate the BTE transfer of messages from one 137 + * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user 138 + * that wants to fit as many msg entries as possible in a given memory size 139 + * (e.g. a memory page). 140 + */ 141 + struct xpc_msg { 142 + u8 flags; /* FOR XPC INTERNAL USE ONLY */ 143 + u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ 144 + s64 number; /* FOR XPC INTERNAL USE ONLY */ 145 + 146 + u64 payload; /* user defined portion of message */ 147 + }; 148 + 149 + 150 + #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) 151 + #define XPC_MSG_SIZE(_payload_size) \ 152 + L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) 153 + 154 + 155 + /* 156 + * Define the return values and values passed to user's callout functions. 157 + * (It is important to add new value codes at the end just preceding 158 + * xpcUnknownReason, which must have the highest numerical value.) 159 + */ 160 + enum xpc_retval { 161 + xpcSuccess = 0, 162 + 163 + xpcNotConnected, /* 1: channel is not connected */ 164 + xpcConnected, /* 2: channel connected (opened) */ 165 + xpcRETIRED1, /* 3: (formerly xpcDisconnected) */ 166 + 167 + xpcMsgReceived, /* 4: message received */ 168 + xpcMsgDelivered, /* 5: message delivered and acknowledged */ 169 + 170 + xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */ 171 + 172 + xpcNoWait, /* 7: operation would require wait */ 173 + xpcRetry, /* 8: retry operation */ 174 + xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ 175 + xpcInterrupted, /* 10: interrupted wait */ 176 + 177 + xpcUnequalMsgSizes, /* 11: message size disparity between sides */ 178 + xpcInvalidAddress, /* 12: invalid address */ 179 + 180 + xpcNoMemory, /* 13: no memory available for XPC structures */ 181 + xpcLackOfResources, /* 14: insufficient resources for operation */ 182 + xpcUnregistered, /* 15: channel is not registered */ 183 + xpcAlreadyRegistered, /* 16: channel is already registered */ 184 + 185 + xpcPartitionDown, /* 17: remote partition is down */ 186 + xpcNotLoaded, /* 18: XPC module is not loaded */ 187 + xpcUnloading, /* 19: this side is unloading XPC module */ 188 + 189 + xpcBadMagic, /* 20: XPC MAGIC string not found */ 190 + 191 + xpcReactivating, /* 21: remote partition was reactivated */ 192 + 193 + xpcUnregistering, /* 22: this side is unregistering channel */ 194 + xpcOtherUnregistering, /* 23: other side is unregistering channel */ 195 + 196 + xpcCloneKThread, /* 24: cloning kernel thread */ 197 + xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */ 198 + 199 + xpcNoHeartbeat, /* 26: remote partition has no heartbeat */ 200 + 201 + xpcPioReadError, /* 27: PIO read error */ 202 + xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */ 203 + 204 + xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */ 205 + xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */ 206 + xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */ 207 + xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */ 208 + xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */ 209 + xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */ 210 + xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */ 211 + xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */ 212 + xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */ 213 + xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */ 214 + 215 + xpcBadVersion, /* 39: bad version number */ 216 + xpcVarsNotSet, /* 40: the XPC variables are not set up */ 217 + xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ 218 + xpcInvalidPartid, /* 42: invalid partition ID */ 219 + xpcLocalPartid, /* 43: local partition ID */ 220 + 221 + xpcUnknownReason /* 44: unknown reason -- must be last in list */ 222 + }; 223 + 224 + 225 + /* 226 + * Define the callout function types used by XPC to update the user on 227 + * connection activity and state changes (via the user function registered by 228 + * xpc_connect()) and to notify them of messages received and delivered (via 229 + * the user function registered by xpc_send_notify()). 230 + * 231 + * The two function types are xpc_channel_func and xpc_notify_func and 232 + * both share the following arguments, with the exception of "data", which 233 + * only xpc_channel_func has. 234 + * 235 + * Arguments: 236 + * 237 + * reason - reason code. (See following table.) 238 + * partid - partition ID associated with condition. 239 + * ch_number - channel # associated with condition. 240 + * data - pointer to optional data. (See following table.) 241 + * key - pointer to optional user-defined value provided as the "key" 242 + * argument to xpc_connect() or xpc_send_notify(). 243 + * 244 + * In the following table the "Optional Data" column applies to callouts made 245 + * to functions registered by xpc_connect(). A "NA" in that column indicates 246 + * that this reason code can be passed to functions registered by 247 + * xpc_send_notify() (i.e. they don't have data arguments). 248 + * 249 + * Also, the first three reason codes in the following table indicate 250 + * success, whereas the others indicate failure. When a failure reason code 251 + * is received, one can assume that the channel is not connected. 252 + * 253 + * 254 + * Reason Code | Cause | Optional Data 255 + * =====================+================================+===================== 256 + * xpcConnected | connection has been established| max #of entries 257 + * | to the specified partition on | allowed in message 258 + * | the specified channel | queue 259 + * ---------------------+--------------------------------+--------------------- 260 + * xpcMsgReceived | an XPC message arrived from | address of payload 261 + * | the specified partition on the | 262 + * | specified channel | [the user must call 263 + * | | xpc_received() when 264 + * | | finished with the 265 + * | | payload] 266 + * ---------------------+--------------------------------+--------------------- 267 + * xpcMsgDelivered | notification that the message | NA 268 + * | was delivered to the intended | 269 + * | recipient and that they have | 270 + * | acknowledged its receipt by | 271 + * | calling xpc_received() | 272 + * =====================+================================+===================== 273 + * xpcUnequalMsgSizes | can't connect to the specified | NULL 274 + * | partition on the specified | 275 + * | channel because of mismatched | 276 + * | message sizes | 277 + * ---------------------+--------------------------------+--------------------- 278 + * xpcNoMemory | insufficient memory avaiable | NULL 279 + * | to allocate message queue | 280 + * ---------------------+--------------------------------+--------------------- 281 + * xpcLackOfResources | lack of resources to create | NULL 282 + * | the necessary kthreads to | 283 + * | support the channel | 284 + * ---------------------+--------------------------------+--------------------- 285 + * xpcUnregistering | this side's user has | NULL or NA 286 + * | unregistered by calling | 287 + * | xpc_disconnect() | 288 + * ---------------------+--------------------------------+--------------------- 289 + * xpcOtherUnregistering| the other side's user has | NULL or NA 290 + * | unregistered by calling | 291 + * | xpc_disconnect() | 292 + * ---------------------+--------------------------------+--------------------- 293 + * xpcNoHeartbeat | the other side's XPC is no | NULL or NA 294 + * | longer heartbeating | 295 + * | | 296 + * ---------------------+--------------------------------+--------------------- 297 + * xpcUnloading | this side's XPC module is | NULL or NA 298 + * | being unloaded | 299 + * | | 300 + * ---------------------+--------------------------------+--------------------- 301 + * xpcOtherUnloading | the other side's XPC module is | NULL or NA 302 + * | is being unloaded | 303 + * | | 304 + * ---------------------+--------------------------------+--------------------- 305 + * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA 306 + * | error while sending an IPI | 307 + * | | 308 + * ---------------------+--------------------------------+--------------------- 309 + * xpcInvalidAddress | the address either received or | NULL or NA 310 + * | sent by the specified partition| 311 + * | is invalid | 312 + * ---------------------+--------------------------------+--------------------- 313 + * xpcBteNotAvailable | attempt to pull data from the | NULL or NA 314 + * xpcBtePoisonError | specified partition over the | 315 + * xpcBteWriteError | specified channel via a | 316 + * xpcBteAccessError | bte_copy() failed | 317 + * xpcBteTimeOutError | | 318 + * xpcBteXtalkError | | 319 + * xpcBteDirectoryError | | 320 + * xpcBteGenericError | | 321 + * xpcBteUnmappedError | | 322 + * ---------------------+--------------------------------+--------------------- 323 + * xpcUnknownReason | the specified channel to the | NULL or NA 324 + * | specified partition was | 325 + * | unavailable for unknown reasons| 326 + * =====================+================================+===================== 327 + */ 328 + 329 + typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, 330 + int ch_number, void *data, void *key); 331 + 332 + typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, 333 + int ch_number, void *key); 334 + 335 + 336 + /* 337 + * The following is a registration entry. There is a global array of these, 338 + * one per channel. It is used to record the connection registration made 339 + * by the users of XPC. As long as a registration entry exists, for any 340 + * partition that comes up, XPC will attempt to establish a connection on 341 + * that channel. Notification that a connection has been made will occur via 342 + * the xpc_channel_func function. 343 + * 344 + * The 'func' field points to the function to call when aynchronous 345 + * notification is required for such events as: a connection established/lost, 346 + * or an incomming message received, or an error condition encountered. A 347 + * non-NULL 'func' field indicates that there is an active registration for 348 + * the channel. 349 + */ 350 + struct xpc_registration { 351 + struct semaphore sema; 352 + xpc_channel_func func; /* function to call */ 353 + void *key; /* pointer to user's key */ 354 + u16 nentries; /* #of msg entries in local msg queue */ 355 + u16 msg_size; /* message queue's message size */ 356 + u32 assigned_limit; /* limit on #of assigned kthreads */ 357 + u32 idle_limit; /* limit on #of idle kthreads */ 358 + } ____cacheline_aligned; 359 + 360 + 361 + #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) 362 + 363 + 364 + /* the following are valid xpc_allocate() flags */ 365 + #define XPC_WAIT 0 /* wait flag */ 366 + #define XPC_NOWAIT 1 /* no wait flag */ 367 + 368 + 369 + struct xpc_interface { 370 + void (*connect)(int); 371 + void (*disconnect)(int); 372 + enum xpc_retval (*allocate)(partid_t, int, u32, void **); 373 + enum xpc_retval (*send)(partid_t, int, void *); 374 + enum xpc_retval (*send_notify)(partid_t, int, void *, 375 + xpc_notify_func, void *); 376 + void (*received)(partid_t, int, void *); 377 + enum xpc_retval (*partid_to_nasids)(partid_t, void *); 378 + }; 379 + 380 + 381 + extern struct xpc_interface xpc_interface; 382 + 383 + extern void xpc_set_interface(void (*)(int), 384 + void (*)(int), 385 + enum xpc_retval (*)(partid_t, int, u32, void **), 386 + enum xpc_retval (*)(partid_t, int, void *), 387 + enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, 388 + void *), 389 + void (*)(partid_t, int, void *), 390 + enum xpc_retval (*)(partid_t, void *)); 391 + extern void xpc_clear_interface(void); 392 + 393 + 394 + extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, 395 + u16, u32, u32); 396 + extern void xpc_disconnect(int); 397 + 398 + static inline enum xpc_retval 399 + xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload) 400 + { 401 + return xpc_interface.allocate(partid, ch_number, flags, payload); 402 + } 403 + 404 + static inline enum xpc_retval 405 + xpc_send(partid_t partid, int ch_number, void *payload) 406 + { 407 + return xpc_interface.send(partid, ch_number, payload); 408 + } 409 + 410 + static inline enum xpc_retval 411 + xpc_send_notify(partid_t partid, int ch_number, void *payload, 412 + xpc_notify_func func, void *key) 413 + { 414 + return xpc_interface.send_notify(partid, ch_number, payload, func, key); 415 + } 416 + 417 + static inline void 418 + xpc_received(partid_t partid, int ch_number, void *payload) 419 + { 420 + return xpc_interface.received(partid, ch_number, payload); 421 + } 422 + 423 + static inline enum xpc_retval 424 + xpc_partid_to_nasids(partid_t partid, void *nasids) 425 + { 426 + return xpc_interface.partid_to_nasids(partid, nasids); 427 + } 428 + 429 + 430 + extern u64 xp_nofault_PIOR_target; 431 + extern int xp_nofault_PIOR(void *); 432 + extern int xp_error_PIOR(void); 433 + 434 + 435 + #endif /* _ASM_IA64_SN_XP_H */ 436 +
+2
kernel/exit.c
··· 846 846 for (;;) ; 847 847 } 848 848 849 + EXPORT_SYMBOL_GPL(do_exit); 850 + 849 851 NORET_TYPE void complete_and_exit(struct completion *comp, long code) 850 852 { 851 853 if (comp)
+2
mm/page_alloc.c
··· 43 43 * initializer cleaner 44 44 */ 45 45 nodemask_t node_online_map = { { [0] = 1UL } }; 46 + EXPORT_SYMBOL(node_online_map); 46 47 nodemask_t node_possible_map = NODE_MASK_ALL; 48 + EXPORT_SYMBOL(node_possible_map); 47 49 struct pglist_data *pgdat_list; 48 50 unsigned long totalram_pages; 49 51 unsigned long totalhigh_pages;