Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sgi-xp: create activate and notify gru message queues

For UV add the code to create the activate and notify gru message queues.

Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

authored by

Dean Nelson and committed by
H. Peter Anvin
2525789b 6c1c325d

+235 -70
+12
drivers/misc/sgi-xp/xpc.h
··· 181 181 xpc_nasid_mask_nlongs)) 182 182 183 183 /* 184 + * Info pertinent to a GRU message queue using a watch list for irq generation. 185 + */ 186 + struct xpc_gru_mq_uv { 187 + void *address; /* address of GRU message queue */ 188 + unsigned int order; /* size of GRU message queue as a power of 2 */ 189 + int irq; /* irq raised when message is received in mq */ 190 + int mmr_blade; /* blade where watchlist was allocated from */ 191 + unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ 192 + int watchlist_num; /* number of watchlist allocatd by BIOS */ 193 + }; 194 + 195 + /* 184 196 * The activate_mq is used to send/receive GRU messages that affect XPC's 185 197 * heartbeat, partition active state, and channel state. This is UV only. 186 198 */
+223 -70
drivers/misc/sgi-xp/xpc_uv.c
··· 18 18 #include <linux/interrupt.h> 19 19 #include <linux/delay.h> 20 20 #include <linux/device.h> 21 + #include <linux/err.h> 21 22 #include <asm/uv/uv_hub.h> 23 + #if defined CONFIG_X86_64 24 + #include <asm/uv/bios.h> 25 + #include <asm/uv/uv_irq.h> 26 + #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 27 + #include <asm/sn/intr.h> 28 + #include <asm/sn/sn_sal.h> 29 + #endif 22 30 #include "../sgi-gru/gru.h" 23 31 #include "../sgi-gru/grukservices.h" 24 32 #include "xpc.h" ··· 35 27 static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); 36 28 37 29 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 30 + #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 31 + XPC_ACTIVATE_MSG_SIZE_UV) 32 + #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" 33 + 38 34 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 35 + #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 36 + XPC_NOTIFY_MSG_SIZE_UV) 37 + #define XPC_NOTIFY_IRQ_NAME "xpc_notify" 39 38 40 - #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 41 - XPC_ACTIVATE_MSG_SIZE_UV) 42 - #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 43 - XPC_NOTIFY_MSG_SIZE_UV) 44 - 45 - static void *xpc_activate_mq_uv; 46 - static void *xpc_notify_mq_uv; 39 + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 40 + static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 47 41 48 42 static int 49 43 xpc_setup_partitions_sn_uv(void) ··· 62 52 return 0; 63 53 } 64 54 65 - static void * 66 - xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, 67 - irq_handler_t irq_handler) 55 + static int 56 + xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) 68 57 { 69 - int ret; 70 - int nid; 71 - int mq_order; 72 - struct page *page; 73 - void *mq; 74 - 75 - nid = cpu_to_node(cpuid); 76 - mq_order = get_order(mq_size); 77 - page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 78 - mq_order); 79 - if (page == NULL) { 80 - dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 81 - "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 82 - return NULL; 58 + #if defined CONFIG_X86_64 59 + mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); 60 + if (mq->irq < 0) { 61 + dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", 62 + mq->irq); 83 63 } 84 64 85 - mq = page_address(page); 86 - ret = gru_create_message_queue(mq, mq_size); 87 - if (ret != 0) { 88 - dev_err(xpc_part, "gru_create_message_queue() returned " 89 - "error=%d\n", ret); 90 - free_pages((unsigned long)mq, mq_order); 91 - return NULL; 92 - } 65 + #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 66 + int mmr_pnode; 67 + unsigned long mmr_value; 93 68 94 - /* !!! Need to do some other things to set up IRQ */ 69 + if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) 70 + mq->irq = SGI_XPC_ACTIVATE; 71 + else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) 72 + mq->irq = SGI_XPC_NOTIFY; 73 + else 74 + return -EINVAL; 95 75 96 - ret = request_irq(irq, irq_handler, 0, "xpc", NULL); 97 - if (ret != 0) { 98 - dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 99 - irq, ret); 100 - free_pages((unsigned long)mq, mq_order); 101 - return NULL; 102 - } 76 + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 77 + mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; 103 78 104 - /* !!! enable generation of irq when GRU mq op occurs to this mq */ 79 + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); 80 + #else 81 + #error not a supported configuration 82 + #endif 105 83 106 - /* ??? allow other partitions to access GRU mq? */ 107 - 108 - return mq; 84 + return 0; 109 85 } 110 86 111 87 static void 112 - xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) 88 + xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) 113 89 { 114 - /* ??? disallow other partitions to access GRU mq? */ 90 + #if defined CONFIG_X86_64 91 + uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); 115 92 116 - /* !!! disable generation of irq when GRU mq op occurs to this mq */ 93 + #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 94 + int mmr_pnode; 95 + unsigned long mmr_value; 117 96 118 - free_irq(irq, NULL); 97 + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 98 + mmr_value = 1UL << 16; 119 99 120 - free_pages((unsigned long)mq, get_order(mq_size)); 100 + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); 101 + #else 102 + #error not a supported configuration 103 + #endif 104 + } 105 + 106 + static int 107 + xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) 108 + { 109 + int ret; 110 + 111 + #if defined CONFIG_X86_64 112 + ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order, 113 + &mq->mmr_offset); 114 + if (ret < 0) { 115 + dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " 116 + "ret=%d\n", ret); 117 + return ret; 118 + } 119 + #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 120 + ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order, 121 + &mq->mmr_offset); 122 + if (ret < 0) { 123 + dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 124 + ret); 125 + return -EBUSY; 126 + } 127 + #else 128 + #error not a supported configuration 129 + #endif 130 + 131 + mq->watchlist_num = ret; 132 + return 0; 133 + } 134 + 135 + static void 136 + xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 137 + { 138 + int ret; 139 + 140 + #if defined CONFIG_X86_64 141 + ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 142 + BUG_ON(ret != BIOS_STATUS_SUCCESS); 143 + #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 144 + ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 145 + BUG_ON(ret != SALRET_OK); 146 + #else 147 + #error not a supported configuration 148 + #endif 149 + } 150 + 151 + static struct xpc_gru_mq_uv * 152 + xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, 153 + irq_handler_t irq_handler) 154 + { 155 + enum xp_retval xp_ret; 156 + int ret; 157 + int nid; 158 + int pg_order; 159 + struct page *page; 160 + struct xpc_gru_mq_uv *mq; 161 + 162 + mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); 163 + if (mq == NULL) { 164 + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 165 + "a xpc_gru_mq_uv structure\n"); 166 + ret = -ENOMEM; 167 + goto out_1; 168 + } 169 + 170 + pg_order = get_order(mq_size); 171 + mq->order = pg_order + PAGE_SHIFT; 172 + mq_size = 1UL << mq->order; 173 + 174 + mq->mmr_blade = uv_cpu_to_blade_id(cpu); 175 + 176 + nid = cpu_to_node(cpu); 177 + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 178 + pg_order); 179 + if (page == NULL) { 180 + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 181 + "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 182 + ret = -ENOMEM; 183 + goto out_2; 184 + } 185 + mq->address = page_address(page); 186 + 187 + ret = gru_create_message_queue(mq->address, mq_size); 188 + if (ret != 0) { 189 + dev_err(xpc_part, "gru_create_message_queue() returned " 190 + "error=%d\n", ret); 191 + ret = -EINVAL; 192 + goto out_3; 193 + } 194 + 195 + /* enable generation of irq when GRU mq operation occurs to this mq */ 196 + ret = xpc_gru_mq_watchlist_alloc_uv(mq); 197 + if (ret != 0) 198 + goto out_3; 199 + 200 + ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); 201 + if (ret != 0) 202 + goto out_4; 203 + 204 + ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); 205 + if (ret != 0) { 206 + dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 207 + mq->irq, ret); 208 + goto out_5; 209 + } 210 + 211 + /* allow other partitions to access this GRU mq */ 212 + xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); 213 + if (xp_ret != xpSuccess) { 214 + ret = -EACCES; 215 + goto out_6; 216 + } 217 + 218 + return mq; 219 + 220 + /* something went wrong */ 221 + out_6: 222 + free_irq(mq->irq, NULL); 223 + out_5: 224 + xpc_release_gru_mq_irq_uv(mq); 225 + out_4: 226 + xpc_gru_mq_watchlist_free_uv(mq); 227 + out_3: 228 + free_pages((unsigned long)mq->address, pg_order); 229 + out_2: 230 + kfree(mq); 231 + out_1: 232 + return ERR_PTR(ret); 233 + } 234 + 235 + static void 236 + xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) 237 + { 238 + unsigned int mq_size; 239 + int pg_order; 240 + int ret; 241 + 242 + /* disallow other partitions to access GRU mq */ 243 + mq_size = 1UL << mq->order; 244 + ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); 245 + BUG_ON(ret != xpSuccess); 246 + 247 + /* unregister irq handler and release mq irq/vector mapping */ 248 + free_irq(mq->irq, NULL); 249 + xpc_release_gru_mq_irq_uv(mq); 250 + 251 + /* disable generation of irq when GRU mq op occurs to this mq */ 252 + xpc_gru_mq_watchlist_free_uv(mq); 253 + 254 + pg_order = mq->order - PAGE_SHIFT; 255 + free_pages((unsigned long)mq->address, pg_order); 256 + 257 + kfree(mq); 121 258 } 122 259 123 260 static enum xp_retval ··· 559 402 struct xpc_partition *part; 560 403 int wakeup_hb_checker = 0; 561 404 562 - while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { 405 + while (1) { 406 + msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); 407 + if (msg_hdr == NULL) 408 + break; 563 409 564 410 partid = msg_hdr->partid; 565 411 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { ··· 578 418 } 579 419 } 580 420 581 - gru_free_message(xpc_activate_mq_uv, msg_hdr); 421 + gru_free_message(xpc_activate_mq_uv->address, msg_hdr); 582 422 } 583 423 584 424 if (wakeup_hb_checker) ··· 667 507 static int 668 508 xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) 669 509 { 670 - rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); 510 + rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); 671 511 return 0; 672 512 } 673 513 ··· 1571 1411 return -E2BIG; 1572 1412 } 1573 1413 1574 - /* ??? The cpuid argument's value is 0, is that what we want? */ 1575 - /* !!! The irq argument's value isn't correct. */ 1576 - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, 1414 + xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 1415 + XPC_ACTIVATE_IRQ_NAME, 1577 1416 xpc_handle_activate_IRQ_uv); 1578 - if (xpc_activate_mq_uv == NULL) 1579 - return -ENOMEM; 1417 + if (IS_ERR(xpc_activate_mq_uv)) 1418 + return PTR_ERR(xpc_activate_mq_uv); 1580 1419 1581 - /* ??? The cpuid argument's value is 0, is that what we want? */ 1582 - /* !!! The irq argument's value isn't correct. */ 1583 - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, 1420 + xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 1421 + XPC_NOTIFY_IRQ_NAME, 1584 1422 xpc_handle_notify_IRQ_uv); 1585 - if (xpc_notify_mq_uv == NULL) { 1586 - /* !!! The irq argument's value isn't correct. */ 1587 - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, 1588 - XPC_ACTIVATE_MQ_SIZE_UV, 0); 1589 - return -ENOMEM; 1423 + if (IS_ERR(xpc_notify_mq_uv)) { 1424 + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1425 + return PTR_ERR(xpc_notify_mq_uv); 1590 1426 } 1591 1427 1592 1428 return 0; ··· 1591 1435 void 1592 1436 xpc_exit_uv(void) 1593 1437 { 1594 - /* !!! The irq argument's value isn't correct. */ 1595 - xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); 1596 - 1597 - /* !!! The irq argument's value isn't correct. */ 1598 - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); 1438 + xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1439 + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1599 1440 }