Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IA64] run some drivers/misc/sgi-xp through scripts/Lindent

Ran patches through scripts/Lindent (part 1).

Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

Dean Nelson and committed by
Tony Luck
4a3ad2dd 45d9ca49

+394 -551
+29 -51
drivers/misc/sgi-xp/xp.h
··· 6 6 * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved. 7 7 */ 8 8 9 - 10 9 /* 11 10 * External Cross Partition (XP) structures and defines. 12 11 */ 13 12 14 - 15 13 #ifndef _DRIVERS_MISC_SGIXP_XP_H 16 14 #define _DRIVERS_MISC_SGIXP_XP_H 17 - 18 15 19 16 #include <linux/cache.h> 20 17 #include <linux/hardirq.h> ··· 19 22 #include <asm/sn/types.h> 20 23 #include <asm/sn/bte.h> 21 24 22 - 23 25 #ifdef USE_DBUG_ON 24 26 #define DBUG_ON(condition) BUG_ON(condition) 25 27 #else 26 28 #define DBUG_ON(condition) 27 29 #endif 28 - 29 30 30 31 /* 31 32 * Define the maximum number of logically defined partitions the system ··· 38 43 */ 39 44 #define XP_MAX_PARTITIONS 64 40 45 41 - 42 46 /* 43 47 * Define the number of u64s required to represent all the C-brick nasids 44 48 * as a bitmap. The cross-partition kernel modules deal only with ··· 47 53 #define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2) 48 54 #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) 49 55 #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) 50 - 51 56 52 57 /* 53 58 * Wrapper for bte_copy() that should it return a failure status will retry ··· 66 73 { 67 74 bte_result_t ret; 68 75 u64 pdst = ia64_tpa(vdst); 69 - 70 76 71 77 /* 72 78 * Ensure that the physically mapped memory is contiguous. ··· 87 95 88 96 return ret; 89 97 } 90 - 91 98 92 99 /* 93 100 * XPC establishes channel connections between the local partition and any ··· 112 121 #if XPC_NCHANNELS > XPC_MAX_NCHANNELS 113 122 #error XPC_NCHANNELS exceeds MAXIMUM allowed. 114 123 #endif 115 - 116 124 117 125 /* 118 126 * The format of an XPC message is as follows: ··· 150 160 u64 payload; /* user defined portion of message */ 151 161 }; 152 162 153 - 154 163 #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) 155 164 #define XPC_MSG_SIZE(_payload_size) \ 156 165 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) 157 - 158 166 159 167 /* 160 168 * Define the return values and values passed to user's callout functions. ··· 258 270 xpcUnknownReason /* 116: unknown reason -- must be last in list */ 259 271 }; 260 272 261 - 262 273 /* 263 274 * Define the callout function types used by XPC to update the user on 264 275 * connection activity and state changes (via the user function registered by ··· 362 375 * =====================+================================+===================== 363 376 */ 364 377 365 - typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, 366 - int ch_number, void *data, void *key); 378 + typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid, 379 + int ch_number, void *data, void *key); 367 380 368 - typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, 369 - int ch_number, void *key); 370 - 381 + typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid, 382 + int ch_number, void *key); 371 383 372 384 /* 373 385 * The following is a registration entry. There is a global array of these, ··· 384 398 */ 385 399 struct xpc_registration { 386 400 struct mutex mutex; 387 - xpc_channel_func func; /* function to call */ 388 - void *key; /* pointer to user's key */ 389 - u16 nentries; /* #of msg entries in local msg queue */ 390 - u16 msg_size; /* message queue's message size */ 391 - u32 assigned_limit; /* limit on #of assigned kthreads */ 392 - u32 idle_limit; /* limit on #of idle kthreads */ 401 + xpc_channel_func func; /* function to call */ 402 + void *key; /* pointer to user's key */ 403 + u16 nentries; /* #of msg entries in local msg queue */ 404 + u16 msg_size; /* message queue's message size */ 405 + u32 assigned_limit; /* limit on #of assigned kthreads */ 406 + u32 idle_limit; /* limit on #of idle kthreads */ 393 407 } ____cacheline_aligned; 394 - 395 408 396 409 #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) 397 410 398 - 399 411 /* the following are valid xpc_allocate() flags */ 400 - #define XPC_WAIT 0 /* wait flag */ 401 - #define XPC_NOWAIT 1 /* no wait flag */ 402 - 412 + #define XPC_WAIT 0 /* wait flag */ 413 + #define XPC_NOWAIT 1 /* no wait flag */ 403 414 404 415 struct xpc_interface { 405 - void (*connect)(int); 406 - void (*disconnect)(int); 407 - enum xpc_retval (*allocate)(partid_t, int, u32, void **); 408 - enum xpc_retval (*send)(partid_t, int, void *); 409 - enum xpc_retval (*send_notify)(partid_t, int, void *, 410 - xpc_notify_func, void *); 411 - void (*received)(partid_t, int, void *); 412 - enum xpc_retval (*partid_to_nasids)(partid_t, void *); 416 + void (*connect) (int); 417 + void (*disconnect) (int); 418 + enum xpc_retval (*allocate) (partid_t, int, u32, void **); 419 + enum xpc_retval (*send) (partid_t, int, void *); 420 + enum xpc_retval (*send_notify) (partid_t, int, void *, 421 + xpc_notify_func, void *); 422 + void (*received) (partid_t, int, void *); 423 + enum xpc_retval (*partid_to_nasids) (partid_t, void *); 413 424 }; 414 - 415 425 416 426 extern struct xpc_interface xpc_interface; 417 427 418 428 extern void xpc_set_interface(void (*)(int), 419 - void (*)(int), 420 - enum xpc_retval (*)(partid_t, int, u32, void **), 421 - enum xpc_retval (*)(partid_t, int, void *), 422 - enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, 423 - void *), 424 - void (*)(partid_t, int, void *), 425 - enum xpc_retval (*)(partid_t, void *)); 429 + void (*)(int), 430 + enum xpc_retval (*)(partid_t, int, u32, void **), 431 + enum xpc_retval (*)(partid_t, int, void *), 432 + enum xpc_retval (*)(partid_t, int, void *, 433 + xpc_notify_func, void *), 434 + void (*)(partid_t, int, void *), 435 + enum xpc_retval (*)(partid_t, void *)); 426 436 extern void xpc_clear_interface(void); 427 437 428 - 429 438 extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, 430 - u16, u32, u32); 439 + u16, u32, u32); 431 440 extern void xpc_disconnect(int); 432 441 433 442 static inline enum xpc_retval ··· 439 458 440 459 static inline enum xpc_retval 441 460 xpc_send_notify(partid_t partid, int ch_number, void *payload, 442 - xpc_notify_func func, void *key) 461 + xpc_notify_func func, void *key) 443 462 { 444 463 return xpc_interface.send_notify(partid, ch_number, payload, func, key); 445 464 } ··· 456 475 return xpc_interface.partid_to_nasids(partid, nasids); 457 476 } 458 477 459 - 460 478 extern u64 xp_nofault_PIOR_target; 461 479 extern int xp_nofault_PIOR(void *); 462 480 extern int xp_error_PIOR(void); 463 481 464 - 465 482 #endif /* _DRIVERS_MISC_SGIXP_XP_H */ 466 -
+44 -54
drivers/misc/sgi-xp/xp_main.c
··· 6 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 - 10 9 /* 11 10 * Cross Partition (XP) base. 12 11 * ··· 13 14 * with XPC, yet not be dependent on XPC. 14 15 * 15 16 */ 16 - 17 17 18 18 #include <linux/kernel.h> 19 19 #include <linux/interrupt.h> ··· 22 24 #include <asm/sn/sn_sal.h> 23 25 #include "xp.h" 24 26 25 - 26 27 /* 27 28 * Target of nofault PIO read. 28 29 */ 29 30 u64 xp_nofault_PIOR_target; 30 - 31 31 32 32 /* 33 33 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level ··· 33 37 */ 34 38 struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 35 39 36 - 37 40 /* 38 41 * Initialize the XPC interface to indicate that XPC isn't loaded. 39 42 */ 40 - static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } 43 + static enum xpc_retval 44 + xpc_notloaded(void) 45 + { 46 + return xpcNotLoaded; 47 + } 41 48 42 49 struct xpc_interface xpc_interface = { 43 - (void (*)(int)) xpc_notloaded, 44 - (void (*)(int)) xpc_notloaded, 45 - (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, 46 - (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, 47 - (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) 48 - xpc_notloaded, 49 - (void (*)(partid_t, int, void *)) xpc_notloaded, 50 - (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded 50 + (void (*)(int))xpc_notloaded, 51 + (void (*)(int))xpc_notloaded, 52 + (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded, 53 + (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded, 54 + (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) 55 + xpc_notloaded, 56 + (void (*)(partid_t, int, void *))xpc_notloaded, 57 + (enum xpc_retval(*)(partid_t, void *))xpc_notloaded 51 58 }; 52 - 53 59 54 60 /* 55 61 * XPC calls this when it (the XPC module) has been loaded. 56 62 */ 57 63 void 58 - xpc_set_interface(void (*connect)(int), 59 - void (*disconnect)(int), 60 - enum xpc_retval (*allocate)(partid_t, int, u32, void **), 61 - enum xpc_retval (*send)(partid_t, int, void *), 62 - enum xpc_retval (*send_notify)(partid_t, int, void *, 63 - xpc_notify_func, void *), 64 - void (*received)(partid_t, int, void *), 65 - enum xpc_retval (*partid_to_nasids)(partid_t, void *)) 64 + xpc_set_interface(void (*connect) (int), 65 + void (*disconnect) (int), 66 + enum xpc_retval (*allocate) (partid_t, int, u32, void **), 67 + enum xpc_retval (*send) (partid_t, int, void *), 68 + enum xpc_retval (*send_notify) (partid_t, int, void *, 69 + xpc_notify_func, void *), 70 + void (*received) (partid_t, int, void *), 71 + enum xpc_retval (*partid_to_nasids) (partid_t, void *)) 66 72 { 67 73 xpc_interface.connect = connect; 68 74 xpc_interface.disconnect = disconnect; ··· 75 77 xpc_interface.partid_to_nasids = partid_to_nasids; 76 78 } 77 79 78 - 79 80 /* 80 81 * XPC calls this when it (the XPC module) is being unloaded. 81 82 */ 82 83 void 83 84 xpc_clear_interface(void) 84 85 { 85 - xpc_interface.connect = (void (*)(int)) xpc_notloaded; 86 - xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; 87 - xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, 88 - void **)) xpc_notloaded; 89 - xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) 90 - xpc_notloaded; 91 - xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, 92 - xpc_notify_func, void *)) xpc_notloaded; 86 + xpc_interface.connect = (void (*)(int))xpc_notloaded; 87 + xpc_interface.disconnect = (void (*)(int))xpc_notloaded; 88 + xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32, 89 + void **))xpc_notloaded; 90 + xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *)) 91 + xpc_notloaded; 92 + xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *, 93 + xpc_notify_func, 94 + void *))xpc_notloaded; 93 95 xpc_interface.received = (void (*)(partid_t, int, void *)) 94 - xpc_notloaded; 95 - xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) 96 - xpc_notloaded; 96 + xpc_notloaded; 97 + xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) 98 + xpc_notloaded; 97 99 } 98 - 99 100 100 101 /* 101 102 * Register for automatic establishment of a channel connection whenever ··· 122 125 */ 123 126 enum xpc_retval 124 127 xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, 125 - u16 nentries, u32 assigned_limit, u32 idle_limit) 128 + u16 nentries, u32 assigned_limit, u32 idle_limit) 126 129 { 127 130 struct xpc_registration *registration; 128 - 129 131 130 132 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 131 133 DBUG_ON(payload_size == 0 || nentries == 0); ··· 158 162 return xpcSuccess; 159 163 } 160 164 161 - 162 165 /* 163 166 * Remove the registration for automatic connection of the specified channel 164 167 * when a partition comes up. ··· 175 180 xpc_disconnect(int ch_number) 176 181 { 177 182 struct xpc_registration *registration; 178 - 179 183 180 184 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 181 185 ··· 208 214 return; 209 215 } 210 216 211 - 212 217 int __init 213 218 xp_init(void) 214 219 { 215 220 int ret, ch_number; 216 - u64 func_addr = *(u64 *) xp_nofault_PIOR; 217 - u64 err_func_addr = *(u64 *) xp_error_PIOR; 218 - 221 + u64 func_addr = *(u64 *)xp_nofault_PIOR; 222 + u64 err_func_addr = *(u64 *)xp_error_PIOR; 219 223 220 224 if (!ia64_platform_is("sn2")) { 221 225 return -ENODEV; ··· 229 237 * work around). 230 238 */ 231 239 if ((ret = sn_register_nofault_code(func_addr, err_func_addr, 232 - err_func_addr, 1, 1)) != 0) { 240 + err_func_addr, 1, 1)) != 0) { 233 241 printk(KERN_ERR "XP: can't register nofault code, error=%d\n", 234 - ret); 242 + ret); 235 243 } 236 244 /* 237 245 * Setup the nofault PIO read target. (There is no special reason why ··· 250 258 251 259 return 0; 252 260 } 253 - module_init(xp_init); 254 261 262 + module_init(xp_init); 255 263 256 264 void __exit 257 265 xp_exit(void) 258 266 { 259 - u64 func_addr = *(u64 *) xp_nofault_PIOR; 260 - u64 err_func_addr = *(u64 *) xp_error_PIOR; 261 - 267 + u64 func_addr = *(u64 *)xp_nofault_PIOR; 268 + u64 err_func_addr = *(u64 *)xp_error_PIOR; 262 269 263 270 /* unregister the PIO read nofault code region */ 264 - (void) sn_register_nofault_code(func_addr, err_func_addr, 265 - err_func_addr, 1, 0); 271 + (void)sn_register_nofault_code(func_addr, err_func_addr, 272 + err_func_addr, 1, 0); 266 273 } 267 - module_exit(xp_exit); 268 274 275 + module_exit(xp_exit); 269 276 270 277 MODULE_AUTHOR("Silicon Graphics, Inc."); 271 278 MODULE_DESCRIPTION("Cross Partition (XP) base"); ··· 278 287 EXPORT_SYMBOL(xpc_set_interface); 279 288 EXPORT_SYMBOL(xpc_connect); 280 289 EXPORT_SYMBOL(xpc_disconnect); 281 -
-1
drivers/misc/sgi-xp/xp_nofault.S
··· 6 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 - 10 9 /* 11 10 * The xp_nofault_PIOR function takes a pointer to a remote PIO register 12 11 * and attempts to load and consume a value from it. This function
+183 -255
drivers/misc/sgi-xp/xpc.h
··· 6 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 - 10 9 /* 11 10 * Cross Partition Communication (XPC) structures and macros. 12 11 */ 13 12 14 13 #ifndef _DRIVERS_MISC_SGIXP_XPC_H 15 14 #define _DRIVERS_MISC_SGIXP_XPC_H 16 - 17 15 18 16 #include <linux/interrupt.h> 19 17 #include <linux/sysctl.h> ··· 27 29 #include <asm/sn/shub_mmr.h> 28 30 #include "xp.h" 29 31 30 - 31 32 /* 32 33 * XPC Version numbers consist of a major and minor number. XPC can always 33 34 * talk to versions with same major #, and never talk to versions with a ··· 35 38 #define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) 36 39 #define XPC_VERSION_MAJOR(_v) ((_v) >> 4) 37 40 #define XPC_VERSION_MINOR(_v) ((_v) & 0xf) 38 - 39 41 40 42 /* 41 43 * The next macros define word or bit representations for given ··· 62 66 63 67 /* define the process name of the discovery thread */ 64 68 #define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" 65 - 66 69 67 70 /* 68 71 * the reserved page ··· 116 121 u64 nasids_size; /* SAL: size of each nasid mask in bytes */ 117 122 }; 118 123 119 - #define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ 124 + #define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ 120 125 121 126 #define XPC_SUPPORTS_RP_STAMP(_version) \ 122 127 (_version >= _XPC_VERSION(1,1)) ··· 133 138 { 134 139 int ret; 135 140 136 - 137 141 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { 138 142 ret = stamp1->tv_nsec - stamp2->tv_nsec; 139 143 } 140 144 return ret; 141 145 } 142 - 143 146 144 147 /* 145 148 * Define the structures by which XPC variables can be exported to other ··· 165 172 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 166 173 }; 167 174 168 - #define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ 175 + #define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ 169 176 170 177 #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ 171 178 (_version >= _XPC_VERSION(3,1)) 172 - 173 179 174 180 static inline int 175 181 xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) ··· 185 193 old_mask = vars->heartbeating_to_mask; 186 194 new_mask = (old_mask | (1UL << partid)); 187 195 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != 188 - old_mask); 196 + old_mask); 189 197 } 190 198 191 199 static inline void ··· 197 205 old_mask = vars->heartbeating_to_mask; 198 206 new_mask = (old_mask & ~(1UL << partid)); 199 207 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != 200 - old_mask); 208 + old_mask); 201 209 } 202 - 203 210 204 211 /* 205 212 * The AMOs page consists of a number of AMO variables which are divided into ··· 212 221 #define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS) 213 222 #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) 214 223 #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) 215 - 216 224 217 225 /* 218 226 * The following structure describes the per partition specific variables. ··· 247 257 * MAGIC2 indicates that this partition has pulled the remote partititions 248 258 * per partition variables that pertain to this partition. 249 259 */ 250 - #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 251 - #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 252 - 260 + #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 261 + #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 253 262 254 263 /* the reserved page sizes and offsets */ 255 264 ··· 259 270 #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) 260 271 #define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) 261 272 #define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) 262 - 263 273 264 274 /* 265 275 * Functions registered by add_timer() or called by kernel_thread() only ··· 273 285 #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) 274 286 #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) 275 287 276 - 277 - 278 288 /* 279 289 * Define a Get/Put value pair (pointers) used with a message queue. 280 290 */ ··· 283 297 284 298 #define XPC_GP_SIZE \ 285 299 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) 286 - 287 - 288 300 289 301 /* 290 302 * Define a structure that contains arguments associated with opening and ··· 299 315 #define XPC_OPENCLOSE_ARGS_SIZE \ 300 316 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) 301 317 302 - 303 - 304 318 /* struct xpc_msg flags */ 305 319 306 320 #define XPC_M_DONE 0x01 /* msg has been received/consumed */ 307 321 #define XPC_M_READY 0x02 /* msg is ready to be sent */ 308 322 #define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ 309 323 310 - 311 324 #define XPC_MSG_ADDRESS(_payload) \ 312 325 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) 313 - 314 - 315 326 316 327 /* 317 328 * Defines notify entry. ··· 315 336 * and consumed by the intended recipient. 316 337 */ 317 338 struct xpc_notify { 318 - volatile u8 type; /* type of notification */ 339 + volatile u8 type; /* type of notification */ 319 340 320 341 /* the following two fields are only used if type == XPC_N_CALL */ 321 - xpc_notify_func func; /* user's notify function */ 322 - void *key; /* pointer to user's key */ 342 + xpc_notify_func func; /* user's notify function */ 343 + void *key; /* pointer to user's key */ 323 344 }; 324 345 325 346 /* struct xpc_notify type of notification */ 326 347 327 348 #define XPC_N_CALL 0x01 /* notify function provided by user */ 328 - 329 - 330 349 331 350 /* 332 351 * Define the structure that manages all the stuff required by a channel. In ··· 405 428 * messages. 406 429 */ 407 430 struct xpc_channel { 408 - partid_t partid; /* ID of remote partition connected */ 409 - spinlock_t lock; /* lock for updating this structure */ 410 - u32 flags; /* general flags */ 431 + partid_t partid; /* ID of remote partition connected */ 432 + spinlock_t lock; /* lock for updating this structure */ 433 + u32 flags; /* general flags */ 411 434 412 - enum xpc_retval reason; /* reason why channel is disconnect'g */ 413 - int reason_line; /* line# disconnect initiated from */ 435 + enum xpc_retval reason; /* reason why channel is disconnect'g */ 436 + int reason_line; /* line# disconnect initiated from */ 414 437 415 - u16 number; /* channel # */ 438 + u16 number; /* channel # */ 416 439 417 - u16 msg_size; /* sizeof each msg entry */ 418 - u16 local_nentries; /* #of msg entries in local msg queue */ 419 - u16 remote_nentries; /* #of msg entries in remote msg queue*/ 440 + u16 msg_size; /* sizeof each msg entry */ 441 + u16 local_nentries; /* #of msg entries in local msg queue */ 442 + u16 remote_nentries; /* #of msg entries in remote msg queue */ 420 443 421 444 void *local_msgqueue_base; /* base address of kmalloc'd space */ 422 445 struct xpc_msg *local_msgqueue; /* local message queue */ 423 446 void *remote_msgqueue_base; /* base address of kmalloc'd space */ 424 - struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ 425 - /* local message queue */ 426 - u64 remote_msgqueue_pa; /* phys addr of remote partition's */ 427 - /* local message queue */ 447 + struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ 448 + /* local message queue */ 449 + u64 remote_msgqueue_pa; /* phys addr of remote partition's */ 450 + /* local message queue */ 428 451 429 - atomic_t references; /* #of external references to queues */ 452 + atomic_t references; /* #of external references to queues */ 430 453 431 - atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 432 - wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 454 + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 455 + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 433 456 434 - u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 435 - /* action until channel disconnected */ 457 + u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 458 + /* action until channel disconnected */ 436 459 437 460 /* queue of msg senders who want to be notified when msg received */ 438 461 439 - atomic_t n_to_notify; /* #of msg senders to notify */ 440 - struct xpc_notify *notify_queue;/* notify queue for messages sent */ 462 + atomic_t n_to_notify; /* #of msg senders to notify */ 463 + struct xpc_notify *notify_queue; /* notify queue for messages sent */ 441 464 442 - xpc_channel_func func; /* user's channel function */ 443 - void *key; /* pointer to user's key */ 465 + xpc_channel_func func; /* user's channel function */ 466 + void *key; /* pointer to user's key */ 444 467 445 468 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ 446 - struct completion wdisconnect_wait; /* wait for channel disconnect */ 469 + struct completion wdisconnect_wait; /* wait for channel disconnect */ 447 470 448 - struct xpc_openclose_args *local_openclose_args; /* args passed on */ 449 - /* opening or closing of channel */ 471 + struct xpc_openclose_args *local_openclose_args; /* args passed on */ 472 + /* opening or closing of channel */ 450 473 451 474 /* various flavors of local and remote Get/Put values */ 452 475 ··· 454 477 struct xpc_gp remote_GP; /* remote Get/Put values */ 455 478 struct xpc_gp w_local_GP; /* working local Get/Put values */ 456 479 struct xpc_gp w_remote_GP; /* working remote Get/Put values */ 457 - s64 next_msg_to_pull; /* Put value of next msg to pull */ 480 + s64 next_msg_to_pull; /* Put value of next msg to pull */ 458 481 459 482 /* kthread management related fields */ 460 483 ··· 462 485 // >>> allow the assigned limit be unbounded and let the idle limit be dynamic 463 486 // >>> dependent on activity over the last interval of time 464 487 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 465 - u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ 466 - atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ 488 + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ 489 + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ 467 490 u32 kthreads_idle_limit; /* limit on #of kthreads idle */ 468 491 atomic_t kthreads_active; /* #of kthreads actively working */ 469 492 // >>> following field is temporary 470 - u32 kthreads_created; /* total #of kthreads created */ 493 + u32 kthreads_created; /* total #of kthreads created */ 471 494 472 495 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 473 496 474 497 } ____cacheline_aligned; 475 498 476 - 477 499 /* struct xpc_channel flags */ 478 500 479 - #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ 501 + #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ 480 502 481 - #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ 482 - #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ 483 - #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ 484 - #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 503 + #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ 504 + #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ 505 + #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ 506 + #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 485 507 486 - #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 487 - #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ 508 + #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 509 + #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ 488 510 #define XPC_C_CONNECTEDCALLOUT_MADE \ 489 - 0x00000080 /* connected callout completed */ 490 - #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ 491 - #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ 511 + 0x00000080 /* connected callout completed */ 512 + #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ 513 + #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ 492 514 493 - #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ 494 - #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ 495 - #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ 496 - #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ 515 + #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ 516 + #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ 517 + #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ 518 + #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ 497 519 498 - #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ 499 - #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ 520 + #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ 521 + #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ 500 522 #define XPC_C_DISCONNECTINGCALLOUT \ 501 - 0x00010000 /* disconnecting callout initiated */ 523 + 0x00010000 /* disconnecting callout initiated */ 502 524 #define XPC_C_DISCONNECTINGCALLOUT_MADE \ 503 - 0x00020000 /* disconnecting callout completed */ 504 - #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 505 - 506 - 525 + 0x00020000 /* disconnecting callout completed */ 526 + #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 507 527 508 528 /* 509 529 * Manages channels on a partition basis. There is one of these structures ··· 511 537 512 538 /* XPC HB infrastructure */ 513 539 514 - u8 remote_rp_version; /* version# of partition's rsvd pg */ 515 - struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */ 516 - u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 517 - u64 remote_vars_pa; /* phys addr of partition's vars */ 540 + u8 remote_rp_version; /* version# of partition's rsvd pg */ 541 + struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */ 542 + u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 543 + u64 remote_vars_pa; /* phys addr of partition's vars */ 518 544 u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 519 - u64 last_heartbeat; /* HB at last read */ 545 + u64 last_heartbeat; /* HB at last read */ 520 546 u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 521 - int remote_act_nasid; /* active part's act/deact nasid */ 547 + int remote_act_nasid; /* active part's act/deact nasid */ 522 548 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ 523 - u32 act_IRQ_rcvd; /* IRQs since activation */ 524 - spinlock_t act_lock; /* protect updating of act_state */ 525 - u8 act_state; /* from XPC HB viewpoint */ 526 - u8 remote_vars_version; /* version# of partition's vars */ 527 - enum xpc_retval reason; /* reason partition is deactivating */ 528 - int reason_line; /* line# deactivation initiated from */ 529 - int reactivate_nasid; /* nasid in partition to reactivate */ 549 + u32 act_IRQ_rcvd; /* IRQs since activation */ 550 + spinlock_t act_lock; /* protect updating of act_state */ 551 + u8 act_state; /* from XPC HB viewpoint */ 552 + u8 remote_vars_version; /* version# of partition's vars */ 553 + enum xpc_retval reason; /* reason partition is deactivating */ 554 + int reason_line; /* line# deactivation initiated from */ 555 + int reactivate_nasid; /* nasid in partition to reactivate */ 530 556 531 - unsigned long disengage_request_timeout; /* timeout in jiffies */ 557 + unsigned long disengage_request_timeout; /* timeout in jiffies */ 532 558 struct timer_list disengage_request_timer; 533 - 534 559 535 560 /* XPC infrastructure referencing and teardown control */ 536 561 537 562 volatile u8 setup_state; /* infrastructure setup state */ 538 563 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 539 - atomic_t references; /* #of references to infrastructure */ 540 - 564 + atomic_t references; /* #of references to infrastructure */ 541 565 542 566 /* 543 567 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN ··· 544 572 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) 545 573 */ 546 574 575 + u8 nchannels; /* #of defined channels supported */ 576 + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 577 + atomic_t nchannels_engaged; /* #of channels engaged with remote part */ 578 + struct xpc_channel *channels; /* array of channel structures */ 547 579 548 - u8 nchannels; /* #of defined channels supported */ 549 - atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 550 - atomic_t nchannels_engaged;/* #of channels engaged with remote part */ 551 - struct xpc_channel *channels;/* array of channel structures */ 552 - 553 - void *local_GPs_base; /* base address of kmalloc'd space */ 554 - struct xpc_gp *local_GPs; /* local Get/Put values */ 555 - void *remote_GPs_base; /* base address of kmalloc'd space */ 556 - struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ 557 - /* values */ 558 - u64 remote_GPs_pa; /* phys address of remote partition's local */ 559 - /* Get/Put values */ 560 - 580 + void *local_GPs_base; /* base address of kmalloc'd space */ 581 + struct xpc_gp *local_GPs; /* local Get/Put values */ 582 + void *remote_GPs_base; /* base address of kmalloc'd space */ 583 + struct xpc_gp *remote_GPs; /* copy of remote partition's local Get/Put */ 584 + /* values */ 585 + u64 remote_GPs_pa; /* phys address of remote partition's local */ 586 + /* Get/Put values */ 561 587 562 588 /* fields used to pass args when opening or closing a channel */ 563 589 564 - void *local_openclose_args_base; /* base address of kmalloc'd space */ 565 - struct xpc_openclose_args *local_openclose_args; /* local's args */ 566 - void *remote_openclose_args_base; /* base address of kmalloc'd space */ 567 - struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 568 - /* args */ 569 - u64 remote_openclose_args_pa; /* phys addr of remote's args */ 570 - 590 + void *local_openclose_args_base; /* base address of kmalloc'd space */ 591 + struct xpc_openclose_args *local_openclose_args; /* local's args */ 592 + void *remote_openclose_args_base; /* base address of kmalloc'd space */ 593 + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 594 + /* args */ 595 + u64 remote_openclose_args_pa; /* phys addr of remote's args */ 571 596 572 597 /* IPI sending, receiving and handling related fields */ 573 598 574 - int remote_IPI_nasid; /* nasid of where to send IPIs */ 575 - int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 576 - AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 599 + int remote_IPI_nasid; /* nasid of where to send IPIs */ 600 + int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 601 + AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 577 602 578 - AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ 579 - u64 local_IPI_amo; /* IPI amo flags yet to be handled */ 580 - char IPI_owner[8]; /* IPI owner's name */ 581 - struct timer_list dropped_IPI_timer; /* dropped IPI timer */ 603 + AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ 604 + u64 local_IPI_amo; /* IPI amo flags yet to be handled */ 605 + char IPI_owner[8]; /* IPI owner's name */ 606 + struct timer_list dropped_IPI_timer; /* dropped IPI timer */ 582 607 583 - spinlock_t IPI_lock; /* IPI handler lock */ 584 - 608 + spinlock_t IPI_lock; /* IPI handler lock */ 585 609 586 610 /* channel manager related fields */ 587 611 588 612 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 589 - wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 613 + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 590 614 591 615 } ____cacheline_aligned; 592 - 593 616 594 617 /* struct xpc_partition act_state values (for XPC HB) */ 595 618 ··· 594 627 #define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ 595 628 #define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ 596 629 597 - 598 630 #define XPC_DEACTIVATE_PARTITION(_p, _reason) \ 599 631 xpc_deactivate_partition(__LINE__, (_p), (_reason)) 600 - 601 632 602 633 /* struct xpc_partition setup_state values */ 603 634 ··· 604 639 #define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ 605 640 #define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 606 641 607 - 608 - 609 642 /* 610 643 * struct xpc_partition IPI_timer #of seconds to wait before checking for 611 644 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until ··· 611 648 */ 612 649 #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 613 650 614 - 615 651 /* number of seconds to wait for other partitions to disengage */ 616 652 #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 617 653 618 654 /* interval in seconds to print 'waiting disengagement' messages */ 619 655 #define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 620 656 621 - 622 657 #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) 623 - 624 - 625 658 626 659 /* found in xp_main.c */ 627 660 extern struct xpc_registration xpc_registrations[]; 628 - 629 661 630 662 /* found in xpc_main.c */ 631 663 extern struct device *xpc_part; ··· 633 675 extern void xpc_activate_kthreads(struct xpc_channel *, int); 634 676 extern void xpc_create_kthreads(struct xpc_channel *, int, int); 635 677 extern void xpc_disconnect_wait(int); 636 - 637 678 638 679 /* found in xpc_partition.c */ 639 680 extern int xpc_exiting; ··· 653 696 extern void xpc_discovery(void); 654 697 extern void xpc_check_remote_hb(void); 655 698 extern void xpc_deactivate_partition(const int, struct xpc_partition *, 656 - enum xpc_retval); 699 + enum xpc_retval); 657 700 extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); 658 - 659 701 660 702 /* found in xpc_channel.c */ 661 703 extern void xpc_initiate_connect(int); ··· 670 714 extern void xpc_connected_callout(struct xpc_channel *); 671 715 extern void xpc_deliver_msg(struct xpc_channel *); 672 716 extern void xpc_disconnect_channel(const int, struct xpc_channel *, 673 - enum xpc_retval, unsigned long *); 717 + enum xpc_retval, unsigned long *); 674 718 extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); 675 719 extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); 676 720 extern void xpc_teardown_infrastructure(struct xpc_partition *); 677 - 678 - 679 721 680 722 static inline void 681 723 xpc_wakeup_channel_mgr(struct xpc_partition *part) ··· 682 728 wake_up(&part->channel_mgr_wq); 683 729 } 684 730 } 685 - 686 - 687 731 688 732 /* 689 733 * These next two inlines are used to keep us from tearing down a channel's ··· 704 752 } 705 753 } 706 754 707 - 708 - 709 755 #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ 710 756 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) 711 - 712 757 713 758 /* 714 759 * These two inlines are used to keep us from tearing down a partition's ··· 715 766 xpc_part_deref(struct xpc_partition *part) 716 767 { 717 768 s32 refs = atomic_dec_return(&part->references); 718 - 719 769 720 770 DBUG_ON(refs < 0); 721 771 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { ··· 727 779 { 728 780 int setup; 729 781 730 - 731 782 atomic_inc(&part->references); 732 783 setup = (part->setup_state == XPC_P_SETUP); 733 784 if (!setup) { ··· 734 787 } 735 788 return setup; 736 789 } 737 - 738 - 739 790 740 791 /* 741 792 * The following macro is to be used for the setting of the reason and ··· 746 801 (_p)->reason_line = _line; \ 747 802 } 748 803 749 - 750 - 751 804 /* 752 805 * This next set of inlines are used to keep track of when a partition is 753 806 * potentially engaged in accessing memory belonging to another partition. ··· 755 812 xpc_mark_partition_engaged(struct xpc_partition *part) 756 813 { 757 814 unsigned long irq_flags; 758 - AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 759 - (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); 760 - 815 + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + 816 + (XPC_ENGAGED_PARTITIONS_AMO * 817 + sizeof(AMO_t))); 761 818 762 819 local_irq_save(irq_flags); 763 820 764 821 /* set bit corresponding to our partid in remote partition's AMO */ 765 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, 766 - (1UL << sn_partition_id)); 822 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, 823 + (1UL << sn_partition_id)); 767 824 /* 768 825 * We must always use the nofault function regardless of whether we 769 826 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 770 827 * didn't, we'd never know that the other partition is down and would 771 828 * keep sending IPIs and AMOs to it until the heartbeat times out. 772 829 */ 773 - (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 774 - variable), xp_nofault_PIOR_target)); 830 + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 831 + variable), 832 + xp_nofault_PIOR_target)); 775 833 776 834 local_irq_restore(irq_flags); 777 835 } ··· 781 837 xpc_mark_partition_disengaged(struct xpc_partition *part) 782 838 { 783 839 unsigned long irq_flags; 784 - AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 785 - (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); 786 - 840 + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + 841 + (XPC_ENGAGED_PARTITIONS_AMO * 842 + sizeof(AMO_t))); 787 843 788 844 local_irq_save(irq_flags); 789 845 790 846 /* clear bit corresponding to our partid in remote partition's AMO */ 791 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 792 - ~(1UL << sn_partition_id)); 847 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 848 + ~(1UL << sn_partition_id)); 793 849 /* 794 850 * We must always use the nofault function regardless of whether we 795 851 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 796 852 * didn't, we'd never know that the other partition is down and would 797 853 * keep sending IPIs and AMOs to it until the heartbeat times out. 798 854 */ 799 - (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 800 - variable), xp_nofault_PIOR_target)); 855 + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 856 + variable), 857 + xp_nofault_PIOR_target)); 801 858 802 859 local_irq_restore(irq_flags); 803 860 } ··· 807 862 xpc_request_partition_disengage(struct xpc_partition *part) 808 863 { 809 864 unsigned long irq_flags; 810 - AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 811 - (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 812 - 865 + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + 866 + (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 813 867 814 868 local_irq_save(irq_flags); 815 869 816 870 /* set bit corresponding to our partid in remote partition's AMO */ 817 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, 818 - (1UL << sn_partition_id)); 871 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, 872 + (1UL << sn_partition_id)); 819 873 /* 820 874 * We must always use the nofault function regardless of whether we 821 875 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 822 876 * didn't, we'd never know that the other partition is down and would 823 877 * keep sending IPIs and AMOs to it until the heartbeat times out. 824 878 */ 825 - (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 826 - variable), xp_nofault_PIOR_target)); 879 + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 880 + variable), 881 + xp_nofault_PIOR_target)); 827 882 828 883 local_irq_restore(irq_flags); 829 884 } ··· 832 887 xpc_cancel_partition_disengage_request(struct xpc_partition *part) 833 888 { 834 889 unsigned long irq_flags; 835 - AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 836 - (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 837 - 890 + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + 891 + (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 838 892 839 893 local_irq_save(irq_flags); 840 894 841 895 /* clear bit corresponding to our partid in remote partition's AMO */ 842 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 843 - ~(1UL << sn_partition_id)); 896 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 897 + ~(1UL << sn_partition_id)); 844 898 /* 845 899 * We must always use the nofault function regardless of whether we 846 900 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 847 901 * didn't, we'd never know that the other partition is down and would 848 902 * keep sending IPIs and AMOs to it until the heartbeat times out. 849 903 */ 850 - (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 851 - variable), xp_nofault_PIOR_target)); 904 + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 905 + variable), 906 + xp_nofault_PIOR_target)); 852 907 853 908 local_irq_restore(irq_flags); 854 909 } ··· 858 913 { 859 914 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 860 915 861 - 862 916 /* return our partition's AMO variable ANDed with partid_mask */ 863 - return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & 864 - partid_mask); 917 + return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 918 + partid_mask); 865 919 } 866 920 867 921 static inline u64 ··· 868 924 { 869 925 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 870 926 871 - 872 927 /* return our partition's AMO variable ANDed with partid_mask */ 873 - return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & 874 - partid_mask); 928 + return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 929 + partid_mask); 875 930 } 876 931 877 932 static inline void ··· 878 935 { 879 936 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 880 937 881 - 882 938 /* clear bit(s) based on partid_mask in our partition's AMO */ 883 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 884 - ~partid_mask); 939 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 940 + ~partid_mask); 885 941 } 886 942 887 943 static inline void ··· 888 946 { 889 947 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 890 948 891 - 892 949 /* clear bit(s) based on partid_mask in our partition's AMO */ 893 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 894 - ~partid_mask); 950 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 951 + ~partid_mask); 895 952 } 896 - 897 - 898 953 899 954 /* 900 955 * The following set of macros and inlines are used for the sending and ··· 903 964 static inline u64 904 965 xpc_IPI_receive(AMO_t *amo) 905 966 { 906 - return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); 967 + return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); 907 968 } 908 - 909 969 910 970 static inline enum xpc_retval 911 971 xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) ··· 912 974 int ret = 0; 913 975 unsigned long irq_flags; 914 976 915 - 916 977 local_irq_save(irq_flags); 917 978 918 - FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); 979 + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); 919 980 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); 920 981 921 982 /* ··· 923 986 * didn't, we'd never know that the other partition is down and would 924 987 * keep sending IPIs and AMOs to it until the heartbeat times out. 925 988 */ 926 - ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 927 - xp_nofault_PIOR_target)); 989 + ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 990 + xp_nofault_PIOR_target)); 928 991 929 992 local_irq_restore(irq_flags); 930 993 931 994 return ((ret == 0) ? xpcSuccess : xpcPioReadError); 932 995 } 933 - 934 996 935 997 /* 936 998 * IPIs associated with SGI_XPC_ACTIVATE IRQ. ··· 940 1004 */ 941 1005 static inline void 942 1006 xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, 943 - int to_phys_cpuid) 1007 + int to_phys_cpuid) 944 1008 { 945 1009 int w_index = XPC_NASID_W_INDEX(from_nasid); 946 1010 int b_index = XPC_NASID_B_INDEX(from_nasid); 947 - AMO_t *amos = (AMO_t *) __va(amos_page_pa + 948 - (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 1011 + AMO_t *amos = (AMO_t *)__va(amos_page_pa + 1012 + (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 949 1013 950 - 951 - (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 952 - to_phys_cpuid, SGI_XPC_ACTIVATE); 1014 + (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 1015 + to_phys_cpuid, SGI_XPC_ACTIVATE); 953 1016 } 954 1017 955 1018 static inline void 956 1019 xpc_IPI_send_activate(struct xpc_vars *vars) 957 1020 { 958 1021 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), 959 - vars->act_nasid, vars->act_phys_cpuid); 1022 + vars->act_nasid, vars->act_phys_cpuid); 960 1023 } 961 1024 962 1025 static inline void 963 1026 xpc_IPI_send_activated(struct xpc_partition *part) 964 1027 { 965 1028 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 966 - part->remote_act_nasid, part->remote_act_phys_cpuid); 1029 + part->remote_act_nasid, 1030 + part->remote_act_phys_cpuid); 967 1031 } 968 1032 969 1033 static inline void 970 1034 xpc_IPI_send_reactivate(struct xpc_partition *part) 971 1035 { 972 1036 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, 973 - xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 1037 + xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 974 1038 } 975 1039 976 1040 static inline void 977 1041 xpc_IPI_send_disengage(struct xpc_partition *part) 978 1042 { 979 1043 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 980 - part->remote_act_nasid, part->remote_act_phys_cpuid); 1044 + part->remote_act_nasid, 1045 + part->remote_act_phys_cpuid); 981 1046 } 982 - 983 1047 984 1048 /* 985 1049 * IPIs associated with SGI_XPC_NOTIFY IRQ. ··· 994 1058 995 1059 static inline void 996 1060 xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, 997 - unsigned long *irq_flags) 1061 + unsigned long *irq_flags) 998 1062 { 999 1063 struct xpc_partition *part = &xpc_partitions[ch->partid]; 1000 1064 enum xpc_retval ret; 1001 1065 1002 - 1003 1066 if (likely(part->act_state != XPC_P_DEACTIVATING)) { 1004 1067 ret = xpc_IPI_send(part->remote_IPI_amo_va, 1005 - (u64) ipi_flag << (ch->number * 8), 1006 - part->remote_IPI_nasid, 1007 - part->remote_IPI_phys_cpuid, 1008 - SGI_XPC_NOTIFY); 1068 + (u64)ipi_flag << (ch->number * 8), 1069 + part->remote_IPI_nasid, 1070 + part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); 1009 1071 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 1010 1072 ipi_flag_string, ch->partid, ch->number, ret); 1011 1073 if (unlikely(ret != xpcSuccess)) { ··· 1018 1084 } 1019 1085 } 1020 1086 1021 - 1022 1087 /* 1023 1088 * Make it look like the remote partition, which is associated with the 1024 1089 * specified channel, sent us an IPI. This faked IPI will be handled ··· 1028 1095 1029 1096 static inline void 1030 1097 xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, 1031 - char *ipi_flag_string) 1098 + char *ipi_flag_string) 1032 1099 { 1033 1100 struct xpc_partition *part = &xpc_partitions[ch->partid]; 1034 1101 1035 - 1036 - FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), 1037 - FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); 1102 + FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable), 1103 + FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); 1038 1104 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", 1039 1105 ipi_flag_string, ch->partid, ch->number); 1040 1106 } 1041 - 1042 1107 1043 1108 /* 1044 1109 * The sending and receiving of IPIs includes the setting of an AMO variable ··· 1052 1121 #define XPC_IPI_OPENREPLY 0x08 1053 1122 #define XPC_IPI_MSGREQUEST 0x10 1054 1123 1055 - 1056 1124 /* given an AMO variable and a channel#, get its associated IPI flags */ 1057 1125 #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1058 1126 #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) ··· 1059 1129 #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) 1060 1130 #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) 1061 1131 1062 - 1063 1132 static inline void 1064 1133 xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) 1065 1134 { 1066 1135 struct xpc_openclose_args *args = ch->local_openclose_args; 1067 - 1068 1136 1069 1137 args->reason = ch->reason; 1070 1138 ··· 1080 1152 { 1081 1153 struct xpc_openclose_args *args = ch->local_openclose_args; 1082 1154 1083 - 1084 1155 args->msg_size = ch->msg_size; 1085 1156 args->local_nentries = ch->local_nentries; 1086 1157 ··· 1090 1163 xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) 1091 1164 { 1092 1165 struct xpc_openclose_args *args = ch->local_openclose_args; 1093 - 1094 1166 1095 1167 args->remote_nentries = ch->remote_nentries; 1096 1168 args->local_nentries = ch->local_nentries; ··· 1110 1184 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); 1111 1185 } 1112 1186 1113 - 1114 1187 /* 1115 1188 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These 1116 1189 * pages are located in the lowest granule. The lowest granule uses 4k pages ··· 1126 1201 { 1127 1202 AMO_t *amo = xpc_vars->amos_page + index; 1128 1203 1129 - 1130 - (void) xpc_IPI_receive(amo); /* clear AMO variable */ 1204 + (void)xpc_IPI_receive(amo); /* clear AMO variable */ 1131 1205 return amo; 1132 1206 } 1133 - 1134 - 1135 1207 1136 1208 static inline enum xpc_retval 1137 1209 xpc_map_bte_errors(bte_result_t error) ··· 1142 1220 return xpcBteUnmappedError; 1143 1221 } 1144 1222 switch (error) { 1145 - case BTE_SUCCESS: return xpcSuccess; 1146 - case BTEFAIL_DIR: return xpcBteDirectoryError; 1147 - case BTEFAIL_POISON: return xpcBtePoisonError; 1148 - case BTEFAIL_WERR: return xpcBteWriteError; 1149 - case BTEFAIL_ACCESS: return xpcBteAccessError; 1150 - case BTEFAIL_PWERR: return xpcBtePWriteError; 1151 - case BTEFAIL_PRERR: return xpcBtePReadError; 1152 - case BTEFAIL_TOUT: return xpcBteTimeOutError; 1153 - case BTEFAIL_XTERR: return xpcBteXtalkError; 1154 - case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; 1155 - default: return xpcBteUnmappedError; 1223 + case BTE_SUCCESS: 1224 + return xpcSuccess; 1225 + case BTEFAIL_DIR: 1226 + return xpcBteDirectoryError; 1227 + case BTEFAIL_POISON: 1228 + return xpcBtePoisonError; 1229 + case BTEFAIL_WERR: 1230 + return xpcBteWriteError; 1231 + case BTEFAIL_ACCESS: 1232 + return xpcBteAccessError; 1233 + case BTEFAIL_PWERR: 1234 + return xpcBtePWriteError; 1235 + case BTEFAIL_PRERR: 1236 + return xpcBtePReadError; 1237 + case BTEFAIL_TOUT: 1238 + return xpcBteTimeOutError; 1239 + case BTEFAIL_XTERR: 1240 + return xpcBteXtalkError; 1241 + case BTEFAIL_NOTAVAIL: 1242 + return xpcBteNotAvailable; 1243 + default: 1244 + return xpcBteUnmappedError; 1156 1245 } 1157 1246 } 1158 - 1159 - 1160 1247 1161 1248 /* 1162 1249 * Check to see if there is any channel activity to/from the specified ··· 1176 1245 { 1177 1246 u64 IPI_amo; 1178 1247 unsigned long irq_flags; 1179 - 1180 1248 1181 1249 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); 1182 1250 if (IPI_amo == 0) { ··· 1192 1262 xpc_wakeup_channel_mgr(part); 1193 1263 } 1194 1264 1195 - 1196 1265 #endif /* _DRIVERS_MISC_SGIXP_XPC_H */ 1197 -
+138 -190
drivers/misc/sgi-xp/xpc_partition.c
··· 6 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 - 10 9 /* 11 10 * Cross Partition Communication (XPC) partition support. 12 11 * ··· 14 15 * heartbeats of other partitions. 15 16 * 16 17 */ 17 - 18 18 19 19 #include <linux/kernel.h> 20 20 #include <linux/sysctl.h> ··· 28 30 #include <asm/sn/addrs.h> 29 31 #include "xpc.h" 30 32 31 - 32 33 /* XPC is exiting flag */ 33 34 int xpc_exiting; 34 - 35 35 36 36 /* SH_IPI_ACCESS shub register value on startup */ 37 37 static u64 xpc_sh1_IPI_access; ··· 38 42 static u64 xpc_sh2_IPI_access2; 39 43 static u64 xpc_sh2_IPI_access3; 40 44 41 - 42 45 /* original protection values for each node */ 43 46 u64 xpc_prot_vec[MAX_NUMNODES]; 44 - 45 47 46 48 /* this partition's reserved page pointers */ 47 49 struct xpc_rsvd_page *xpc_rsvd_page; ··· 51 57 static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ 52 58 static int xp_nasid_mask_words; /* actual size in words of nasid mask */ 53 59 54 - 55 60 /* 56 61 * For performance reasons, each entry of xpc_partitions[] is cacheline 57 62 * aligned. And xpc_partitions[] is padded with an additional entry at the ··· 59 66 */ 60 67 struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 61 68 62 - 63 69 /* 64 70 * Generic buffer used to store a local copy of portions of a remote 65 71 * partition's reserved page (either its header and part_nasids mask, ··· 66 74 */ 67 75 char *xpc_remote_copy_buffer; 68 76 void *xpc_remote_copy_buffer_base; 69 - 70 77 71 78 /* 72 79 * Guarantee that the kmalloc'd memory is cacheline aligned. ··· 78 87 if (*base == NULL) { 79 88 return NULL; 80 89 } 81 - if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 90 + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) { 82 91 return *base; 83 92 } 84 93 kfree(*base); ··· 88 97 if (*base == NULL) { 89 98 return NULL; 90 99 } 91 - return (void *) L1_CACHE_ALIGN((u64) *base); 100 + return (void *)L1_CACHE_ALIGN((u64)*base); 92 101 } 93 - 94 102 95 103 /* 96 104 * Given a nasid, get the physical address of the partition's reserved page ··· 107 117 u64 buf_len = 0; 108 118 void *buf_base = NULL; 109 119 110 - 111 120 while (1) { 112 121 113 122 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, 114 - &len); 123 + &len); 115 124 116 125 dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" 117 126 "0x%016lx, address=0x%016lx, len=0x%016lx\n", ··· 123 134 if (L1_CACHE_ALIGN(len) > buf_len) { 124 135 kfree(buf_base); 125 136 buf_len = L1_CACHE_ALIGN(len); 126 - buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, 127 - GFP_KERNEL, &buf_base); 137 + buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, 138 + GFP_KERNEL, 139 + &buf_base); 128 140 if (buf_base == NULL) { 129 141 dev_err(xpc_part, "unable to kmalloc " 130 142 "len=0x%016lx\n", buf_len); ··· 135 145 } 136 146 137 147 bte_res = xp_bte_copy(rp_pa, buf, buf_len, 138 - (BTE_NOTIFY | BTE_WACQUIRE), NULL); 148 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 139 149 if (bte_res != BTE_SUCCESS) { 140 150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 141 151 status = SALRET_ERROR; ··· 152 162 return rp_pa; 153 163 } 154 164 155 - 156 165 /* 157 166 * Fill the partition reserved page with the information needed by 158 167 * other partitions to discover we are alive and establish initial ··· 165 176 u64 rp_pa, nasid_array = 0; 166 177 int i, ret; 167 178 168 - 169 179 /* get the local reserved page's address */ 170 180 171 181 preempt_disable(); ··· 174 186 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 175 187 return NULL; 176 188 } 177 - rp = (struct xpc_rsvd_page *) __va(rp_pa); 189 + rp = (struct xpc_rsvd_page *)__va(rp_pa); 178 190 179 191 if (rp->partid != sn_partition_id) { 180 192 dev_err(xpc_part, "the reserved page's partid of %d should be " ··· 211 223 * memory protections are never restricted. 212 224 */ 213 225 if ((amos_page = xpc_vars->amos_page) == NULL) { 214 - amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); 226 + amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0)); 215 227 if (amos_page == NULL) { 216 228 dev_err(xpc_part, "can't allocate page of AMOs\n"); 217 229 return NULL; ··· 222 234 * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 223 235 */ 224 236 if (!enable_shub_wars_1_1()) { 225 - ret = sn_change_memprotect(ia64_tpa((u64) amos_page), 226 - PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, 227 - &nasid_array); 237 + ret = sn_change_memprotect(ia64_tpa((u64)amos_page), 238 + PAGE_SIZE, 239 + SN_MEMPROT_ACCESS_CLASS_1, 240 + &nasid_array); 228 241 if (ret != 0) { 229 242 dev_err(xpc_part, "can't change memory " 230 243 "protections\n"); 231 244 uncached_free_page(__IA64_UNCACHED_OFFSET | 232 - TO_PHYS((u64) amos_page)); 245 + TO_PHYS((u64)amos_page)); 233 246 return NULL; 234 247 } 235 248 } 236 - } else if (!IS_AMO_ADDRESS((u64) amos_page)) { 249 + } else if (!IS_AMO_ADDRESS((u64)amos_page)) { 237 250 /* 238 251 * EFI's XPBOOT can also set amos_page in the reserved page, 239 252 * but it happens to leave it as an uncached physical address 240 253 * and we need it to be an uncached virtual, so we'll have to 241 254 * convert it. 242 255 */ 243 - if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { 256 + if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) { 244 257 dev_err(xpc_part, "previously used amos_page address " 245 - "is bad = 0x%p\n", (void *) amos_page); 258 + "is bad = 0x%p\n", (void *)amos_page); 246 259 return NULL; 247 260 } 248 - amos_page = (AMO_t *) TO_AMO((u64) amos_page); 261 + amos_page = (AMO_t *)TO_AMO((u64)amos_page); 249 262 } 250 263 251 264 /* clear xpc_vars */ ··· 256 267 xpc_vars->act_nasid = cpuid_to_nasid(0); 257 268 xpc_vars->act_phys_cpuid = cpu_physical_id(0); 258 269 xpc_vars->vars_part_pa = __pa(xpc_vars_part); 259 - xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); 260 - xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 261 - 270 + xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); 271 + xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 262 272 263 273 /* clear xpc_vars_part */ 264 - memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * 265 - XP_MAX_PARTITIONS); 274 + memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * 275 + XP_MAX_PARTITIONS); 266 276 267 277 /* initialize the activate IRQ related AMO variables */ 268 278 for (i = 0; i < xp_nasid_mask_words; i++) { 269 - (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); 279 + (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); 270 280 } 271 281 272 282 /* initialize the engaged remote partitions related AMO variables */ 273 - (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); 274 - (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); 283 + (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); 284 + (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); 275 285 276 286 /* timestamp of when reserved page was setup by XPC */ 277 287 rp->stamp = CURRENT_TIME; ··· 284 296 return rp; 285 297 } 286 298 287 - 288 299 /* 289 300 * Change protections to allow IPI operations (and AMO operations on 290 301 * Shub 1.1 systems). ··· 294 307 int node; 295 308 int nasid; 296 309 297 - 298 310 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 299 311 300 312 if (is_shub2()) { 301 313 xpc_sh2_IPI_access0 = 302 - (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 314 + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 303 315 xpc_sh2_IPI_access1 = 304 - (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 316 + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 305 317 xpc_sh2_IPI_access2 = 306 - (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 318 + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 307 319 xpc_sh2_IPI_access3 = 308 - (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 320 + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 309 321 310 322 for_each_online_node(node) { 311 323 nasid = cnodeid_to_nasid(node); 312 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 313 - -1UL); 314 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 315 - -1UL); 316 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 317 - -1UL); 318 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 319 - -1UL); 324 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 325 + -1UL); 326 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 327 + -1UL); 328 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 329 + -1UL); 330 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 331 + -1UL); 320 332 } 321 333 322 334 } else { 323 335 xpc_sh1_IPI_access = 324 - (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 336 + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 325 337 326 338 for_each_online_node(node) { 327 339 nasid = cnodeid_to_nasid(node); 328 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 329 - -1UL); 340 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 341 + -1UL); 330 342 331 343 /* 332 344 * Since the BIST collides with memory operations on ··· 333 347 */ 334 348 if (enable_shub_wars_1_1()) { 335 349 /* open up everything */ 336 - xpc_prot_vec[node] = (u64) HUB_L((u64 *) 337 - GLOBAL_MMR_ADDR(nasid, 338 - SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 339 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 340 - SH1_MD_DQLP_MMR_DIR_PRIVEC0), 341 - -1UL); 342 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 343 - SH1_MD_DQRP_MMR_DIR_PRIVEC0), 344 - -1UL); 350 + xpc_prot_vec[node] = (u64)HUB_L((u64 *) 351 + GLOBAL_MMR_ADDR 352 + (nasid, 353 + SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 354 + HUB_S((u64 *) 355 + GLOBAL_MMR_ADDR(nasid, 356 + SH1_MD_DQLP_MMR_DIR_PRIVEC0), 357 + -1UL); 358 + HUB_S((u64 *) 359 + GLOBAL_MMR_ADDR(nasid, 360 + SH1_MD_DQRP_MMR_DIR_PRIVEC0), 361 + -1UL); 345 362 } 346 363 } 347 364 } 348 365 } 349 - 350 366 351 367 /* 352 368 * Restrict protections to disallow IPI operations (and AMO operations on ··· 360 372 int node; 361 373 int nasid; 362 374 363 - 364 375 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 365 376 366 377 if (is_shub2()) { 367 378 368 379 for_each_online_node(node) { 369 380 nasid = cnodeid_to_nasid(node); 370 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 371 - xpc_sh2_IPI_access0); 372 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 373 - xpc_sh2_IPI_access1); 374 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 375 - xpc_sh2_IPI_access2); 376 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 377 - xpc_sh2_IPI_access3); 381 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 382 + xpc_sh2_IPI_access0); 383 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 384 + xpc_sh2_IPI_access1); 385 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 386 + xpc_sh2_IPI_access2); 387 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 388 + xpc_sh2_IPI_access3); 378 389 } 379 390 380 391 } else { 381 392 382 393 for_each_online_node(node) { 383 394 nasid = cnodeid_to_nasid(node); 384 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 385 - xpc_sh1_IPI_access); 395 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 396 + xpc_sh1_IPI_access); 386 397 387 398 if (enable_shub_wars_1_1()) { 388 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 389 - SH1_MD_DQLP_MMR_DIR_PRIVEC0), 390 - xpc_prot_vec[node]); 391 - HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 392 - SH1_MD_DQRP_MMR_DIR_PRIVEC0), 393 - xpc_prot_vec[node]); 399 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, 400 + SH1_MD_DQLP_MMR_DIR_PRIVEC0), 401 + xpc_prot_vec[node]); 402 + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, 403 + SH1_MD_DQRP_MMR_DIR_PRIVEC0), 404 + xpc_prot_vec[node]); 394 405 } 395 406 } 396 407 } 397 408 } 398 - 399 409 400 410 /* 401 411 * At periodic intervals, scan through all active partitions and ensure ··· 407 421 partid_t partid; 408 422 bte_result_t bres; 409 423 410 - 411 - remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 424 + remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; 412 425 413 426 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 414 427 ··· 422 437 part = &xpc_partitions[partid]; 423 438 424 439 if (part->act_state == XPC_P_INACTIVE || 425 - part->act_state == XPC_P_DEACTIVATING) { 440 + part->act_state == XPC_P_DEACTIVATING) { 426 441 continue; 427 442 } 428 443 429 444 /* pull the remote_hb cache line */ 430 445 bres = xp_bte_copy(part->remote_vars_pa, 431 - (u64) remote_vars, 432 - XPC_RP_VARS_SIZE, 433 - (BTE_NOTIFY | BTE_WACQUIRE), NULL); 446 + (u64)remote_vars, 447 + XPC_RP_VARS_SIZE, 448 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 434 449 if (bres != BTE_SUCCESS) { 435 450 XPC_DEACTIVATE_PARTITION(part, 436 - xpc_map_bte_errors(bres)); 451 + xpc_map_bte_errors(bres)); 437 452 continue; 438 453 } 439 454 ··· 444 459 remote_vars->heartbeating_to_mask); 445 460 446 461 if (((remote_vars->heartbeat == part->last_heartbeat) && 447 - (remote_vars->heartbeat_offline == 0)) || 448 - !xpc_hb_allowed(sn_partition_id, remote_vars)) { 462 + (remote_vars->heartbeat_offline == 0)) || 463 + !xpc_hb_allowed(sn_partition_id, remote_vars)) { 449 464 450 465 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); 451 466 continue; ··· 454 469 part->last_heartbeat = remote_vars->heartbeat; 455 470 } 456 471 } 457 - 458 472 459 473 /* 460 474 * Get a copy of a portion of the remote partition's rsvd page. ··· 464 480 */ 465 481 static enum xpc_retval 466 482 xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 467 - struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 483 + struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 468 484 { 469 485 int bres, i; 470 - 471 486 472 487 /* get the reserved page's physical address */ 473 488 ··· 475 492 return xpcNoRsvdPageAddr; 476 493 } 477 494 478 - 479 495 /* pull over the reserved page header and part_nasids mask */ 480 - bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, 481 - XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 482 - (BTE_NOTIFY | BTE_WACQUIRE), NULL); 496 + bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, 497 + XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 498 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 483 499 if (bres != BTE_SUCCESS) { 484 500 return xpc_map_bte_errors(bres); 485 501 } 486 502 487 - 488 503 if (discovered_nasids != NULL) { 489 504 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); 490 - 491 505 492 506 for (i = 0; i < xp_nasid_mask_words; i++) { 493 507 discovered_nasids[i] |= remote_part_nasids[i]; 494 508 } 495 509 } 496 510 497 - 498 511 /* check that the partid is for another partition */ 499 512 500 513 if (remote_rp->partid < 1 || 501 - remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 514 + remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 502 515 return xpcInvalidPartid; 503 516 } 504 517 ··· 502 523 return xpcLocalPartid; 503 524 } 504 525 505 - 506 526 if (XPC_VERSION_MAJOR(remote_rp->version) != 507 - XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 527 + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 508 528 return xpcBadVersion; 509 529 } 510 530 511 531 return xpcSuccess; 512 532 } 513 - 514 533 515 534 /* 516 535 * Get a copy of the remote partition's XPC variables from the reserved page. ··· 521 544 { 522 545 int bres; 523 546 524 - 525 547 if (remote_vars_pa == 0) { 526 548 return xpcVarsNotSet; 527 549 } 528 550 529 551 /* pull over the cross partition variables */ 530 - bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, 531 - (BTE_NOTIFY | BTE_WACQUIRE), NULL); 552 + bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, 553 + (BTE_NOTIFY | BTE_WACQUIRE), NULL); 532 554 if (bres != BTE_SUCCESS) { 533 555 return xpc_map_bte_errors(bres); 534 556 } 535 557 536 558 if (XPC_VERSION_MAJOR(remote_vars->version) != 537 - XPC_VERSION_MAJOR(XPC_V_VERSION)) { 559 + XPC_VERSION_MAJOR(XPC_V_VERSION)) { 538 560 return xpcBadVersion; 539 561 } 540 562 541 563 return xpcSuccess; 542 564 } 543 565 544 - 545 566 /* 546 567 * Update the remote partition's info. 547 568 */ 548 569 static void 549 570 xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, 550 - struct timespec *remote_rp_stamp, u64 remote_rp_pa, 551 - u64 remote_vars_pa, struct xpc_vars *remote_vars) 571 + struct timespec *remote_rp_stamp, u64 remote_rp_pa, 572 + u64 remote_vars_pa, struct xpc_vars *remote_vars) 552 573 { 553 574 part->remote_rp_version = remote_rp_version; 554 575 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", ··· 588 613 part->remote_vars_version); 589 614 } 590 615 591 - 592 616 /* 593 617 * Prior code has determined the nasid which generated an IPI. Inspect 594 618 * that nasid to determine if its partition needs to be activated or ··· 617 643 struct xpc_partition *part; 618 644 enum xpc_retval ret; 619 645 620 - 621 646 /* pull over the reserved page structure */ 622 647 623 - remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; 648 + remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; 624 649 625 650 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); 626 651 if (ret != xpcSuccess) { 627 652 dev_warn(xpc_part, "unable to get reserved page from nasid %d, " 628 - "which sent interrupt, reason=%d\n", nasid, ret); 653 + "which sent interrupt, reason=%d\n", nasid, ret); 629 654 return; 630 655 } 631 656 ··· 636 663 partid = remote_rp->partid; 637 664 part = &xpc_partitions[partid]; 638 665 639 - 640 666 /* pull over the cross partition variables */ 641 667 642 - remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 668 + remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; 643 669 644 670 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 645 671 if (ret != xpcSuccess) { 646 672 647 673 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 648 - "which sent interrupt, reason=%d\n", nasid, ret); 674 + "which sent interrupt, reason=%d\n", nasid, ret); 649 675 650 676 XPC_DEACTIVATE_PARTITION(part, ret); 651 677 return; 652 678 } 653 679 654 - 655 680 part->act_IRQ_rcvd++; 656 681 657 682 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 658 - "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, 683 + "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd, 659 684 remote_vars->heartbeat, remote_vars->heartbeating_to_mask); 660 685 661 - if (xpc_partition_disengaged(part) && 662 - part->act_state == XPC_P_INACTIVE) { 686 + if (xpc_partition_disengaged(part) && part->act_state == XPC_P_INACTIVE) { 663 687 664 688 xpc_update_partition_info(part, remote_rp_version, 665 - &remote_rp_stamp, remote_rp_pa, 666 - remote_vars_pa, remote_vars); 689 + &remote_rp_stamp, remote_rp_pa, 690 + remote_vars_pa, remote_vars); 667 691 668 692 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 669 693 if (xpc_partition_disengage_requested(1UL << partid)) { ··· 684 714 685 715 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { 686 716 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 687 - remote_vars_version)); 717 + remote_vars_version)); 688 718 689 719 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 690 720 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 691 - version)); 721 + version)); 692 722 /* see if the other side rebooted */ 693 723 if (part->remote_amos_page_pa == 694 - remote_vars->amos_page_pa && 695 - xpc_hb_allowed(sn_partition_id, 696 - remote_vars)) { 724 + remote_vars->amos_page_pa && 725 + xpc_hb_allowed(sn_partition_id, remote_vars)) { 697 726 /* doesn't look that way, so ignore the IPI */ 698 727 return; 699 728 } ··· 704 735 */ 705 736 706 737 xpc_update_partition_info(part, remote_rp_version, 707 - &remote_rp_stamp, remote_rp_pa, 708 - remote_vars_pa, remote_vars); 738 + &remote_rp_stamp, remote_rp_pa, 739 + remote_vars_pa, remote_vars); 709 740 part->reactivate_nasid = nasid; 710 741 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 711 742 return; ··· 725 756 xpc_clear_partition_disengage_request(1UL << partid); 726 757 727 758 xpc_update_partition_info(part, remote_rp_version, 728 - &remote_rp_stamp, remote_rp_pa, 729 - remote_vars_pa, remote_vars); 759 + &remote_rp_stamp, remote_rp_pa, 760 + remote_vars_pa, remote_vars); 730 761 reactivate = 1; 731 762 732 763 } else { 733 764 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); 734 765 735 766 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, 736 - &remote_rp_stamp); 767 + &remote_rp_stamp); 737 768 if (stamp_diff != 0) { 738 769 DBUG_ON(stamp_diff >= 0); 739 770 ··· 744 775 745 776 DBUG_ON(xpc_partition_engaged(1UL << partid)); 746 777 DBUG_ON(xpc_partition_disengage_requested(1UL << 747 - partid)); 778 + partid)); 748 779 749 780 xpc_update_partition_info(part, remote_rp_version, 750 - &remote_rp_stamp, remote_rp_pa, 751 - remote_vars_pa, remote_vars); 781 + &remote_rp_stamp, 782 + remote_rp_pa, remote_vars_pa, 783 + remote_vars); 752 784 reactivate = 1; 753 785 } 754 786 } 755 787 756 788 if (part->disengage_request_timeout > 0 && 757 - !xpc_partition_disengaged(part)) { 789 + !xpc_partition_disengaged(part)) { 758 790 /* still waiting on other side to disengage from us */ 759 791 return; 760 792 } ··· 765 795 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 766 796 767 797 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && 768 - xpc_partition_disengage_requested(1UL << partid)) { 798 + xpc_partition_disengage_requested(1UL << partid)) { 769 799 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); 770 800 } 771 801 } 772 - 773 802 774 803 /* 775 804 * Loop through the activation AMO variables and process any bits ··· 782 813 { 783 814 int word, bit; 784 815 u64 nasid_mask; 785 - u64 nasid; /* remote nasid */ 816 + u64 nasid; /* remote nasid */ 786 817 int n_IRQs_detected = 0; 787 818 AMO_t *act_amos; 788 819 789 - 790 820 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; 791 - 792 821 793 822 /* scan through act AMO variable looking for non-zero entries */ 794 823 for (word = 0; word < xp_nasid_mask_words; word++) { ··· 804 837 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, 805 838 nasid_mask); 806 839 807 - 808 840 /* 809 841 * If this nasid has been added to the machine since 810 842 * our partition was reset, this will retain the ··· 811 845 * This is used in the event of module reload. 812 846 */ 813 847 xpc_mach_nasids[word] |= nasid_mask; 814 - 815 848 816 849 /* locate the nasid(s) which sent interrupts */ 817 850 ··· 827 862 return n_IRQs_detected; 828 863 } 829 864 830 - 831 865 /* 832 866 * See if the other side has responded to a partition disengage request 833 867 * from us. ··· 836 872 { 837 873 partid_t partid = XPC_PARTID(part); 838 874 int disengaged; 839 - 840 875 841 876 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 842 877 if (part->disengage_request_timeout) { ··· 851 888 */ 852 889 853 890 dev_info(xpc_part, "disengage from remote partition %d " 854 - "timed out\n", partid); 891 + "timed out\n", partid); 855 892 xpc_disengage_request_timedout = 1; 856 893 xpc_clear_partition_engaged(1UL << partid); 857 894 disengaged = 1; ··· 861 898 /* cancel the timer function, provided it's not us */ 862 899 if (!in_interrupt()) { 863 900 del_singleshot_timer_sync(&part-> 864 - disengage_request_timer); 901 + disengage_request_timer); 865 902 } 866 903 867 904 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 868 - part->act_state != XPC_P_INACTIVE); 905 + part->act_state != XPC_P_INACTIVE); 869 906 if (part->act_state != XPC_P_INACTIVE) { 870 907 xpc_wakeup_channel_mgr(part); 871 908 } ··· 877 914 return disengaged; 878 915 } 879 916 880 - 881 917 /* 882 918 * Mark specified partition as active. 883 919 */ ··· 885 923 { 886 924 unsigned long irq_flags; 887 925 enum xpc_retval ret; 888 - 889 926 890 927 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 891 928 ··· 901 940 return ret; 902 941 } 903 942 904 - 905 943 /* 906 944 * Notify XPC that the partition is down. 907 945 */ 908 946 void 909 947 xpc_deactivate_partition(const int line, struct xpc_partition *part, 910 - enum xpc_retval reason) 948 + enum xpc_retval reason) 911 949 { 912 950 unsigned long irq_flags; 913 - 914 951 915 952 spin_lock_irqsave(&part->act_lock, irq_flags); 916 953 ··· 923 964 } 924 965 if (part->act_state == XPC_P_DEACTIVATING) { 925 966 if ((part->reason == xpcUnloading && reason != xpcUnloading) || 926 - reason == xpcReactivating) { 967 + reason == xpcReactivating) { 927 968 XPC_SET_REASON(part, reason, line); 928 969 } 929 970 spin_unlock_irqrestore(&part->act_lock, irq_flags); ··· 941 982 942 983 /* set a timelimit on the disengage request */ 943 984 part->disengage_request_timeout = jiffies + 944 - (xpc_disengage_request_timelimit * HZ); 985 + (xpc_disengage_request_timelimit * HZ); 945 986 part->disengage_request_timer.expires = 946 - part->disengage_request_timeout; 987 + part->disengage_request_timeout; 947 988 add_timer(&part->disengage_request_timer); 948 989 } 949 990 ··· 953 994 xpc_partition_going_down(part, reason); 954 995 } 955 996 956 - 957 997 /* 958 998 * Mark specified partition as inactive. 959 999 */ ··· 960 1002 xpc_mark_partition_inactive(struct xpc_partition *part) 961 1003 { 962 1004 unsigned long irq_flags; 963 - 964 1005 965 1006 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", 966 1007 XPC_PARTID(part)); ··· 969 1012 spin_unlock_irqrestore(&part->act_lock, irq_flags); 970 1013 part->remote_rp_pa = 0; 971 1014 } 972 - 973 1015 974 1016 /* 975 1017 * SAL has provided a partition and machine mask. The partition mask ··· 997 1041 u64 *discovered_nasids; 998 1042 enum xpc_retval ret; 999 1043 1000 - 1001 1044 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 1002 - xp_nasid_mask_bytes, 1003 - GFP_KERNEL, &remote_rp_base); 1045 + xp_nasid_mask_bytes, 1046 + GFP_KERNEL, &remote_rp_base); 1004 1047 if (remote_rp == NULL) { 1005 1048 return; 1006 1049 } 1007 - remote_vars = (struct xpc_vars *) remote_rp; 1008 - 1050 + remote_vars = (struct xpc_vars *)remote_rp; 1009 1051 1010 1052 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, 1011 - GFP_KERNEL); 1053 + GFP_KERNEL); 1012 1054 if (discovered_nasids == NULL) { 1013 1055 kfree(remote_rp_base); 1014 1056 return; 1015 1057 } 1016 1058 1017 - rp = (struct xpc_rsvd_page *) xpc_rsvd_page; 1059 + rp = (struct xpc_rsvd_page *)xpc_rsvd_page; 1018 1060 1019 1061 /* 1020 1062 * The term 'region' in this context refers to the minimum number of ··· 1035 1081 1036 1082 for (region = 0; region < max_regions; region++) { 1037 1083 1038 - if ((volatile int) xpc_exiting) { 1084 + if ((volatile int)xpc_exiting) { 1039 1085 break; 1040 1086 } 1041 1087 1042 1088 dev_dbg(xpc_part, "searching region %d\n", region); 1043 1089 1044 1090 for (nasid = (region * region_size * 2); 1045 - nasid < ((region + 1) * region_size * 2); 1046 - nasid += 2) { 1091 + nasid < ((region + 1) * region_size * 2); nasid += 2) { 1047 1092 1048 - if ((volatile int) xpc_exiting) { 1093 + if ((volatile int)xpc_exiting) { 1049 1094 break; 1050 1095 } 1051 1096 1052 1097 dev_dbg(xpc_part, "checking nasid %d\n", nasid); 1053 - 1054 1098 1055 1099 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { 1056 1100 dev_dbg(xpc_part, "PROM indicates Nasid %d is " ··· 1071 1119 continue; 1072 1120 } 1073 1121 1074 - 1075 1122 /* pull over the reserved page structure */ 1076 1123 1077 1124 ret = xpc_get_remote_rp(nasid, discovered_nasids, 1078 - remote_rp, &remote_rp_pa); 1125 + remote_rp, &remote_rp_pa); 1079 1126 if (ret != xpcSuccess) { 1080 1127 dev_dbg(xpc_part, "unable to get reserved page " 1081 1128 "from nasid %d, reason=%d\n", nasid, ··· 1090 1139 1091 1140 partid = remote_rp->partid; 1092 1141 part = &xpc_partitions[partid]; 1093 - 1094 1142 1095 1143 /* pull over the cross partition variables */ 1096 1144 ··· 1121 1171 * get the same page for remote_act_amos_pa after 1122 1172 * module reloads and system reboots. 1123 1173 */ 1124 - if (sn_register_xp_addr_region( 1125 - remote_vars->amos_page_pa, 1126 - PAGE_SIZE, 1) < 0) { 1127 - dev_dbg(xpc_part, "partition %d failed to " 1174 + if (sn_register_xp_addr_region 1175 + (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) { 1176 + dev_dbg(xpc_part, 1177 + "partition %d failed to " 1128 1178 "register xp_addr region 0x%016lx\n", 1129 1179 partid, remote_vars->amos_page_pa); 1130 1180 1131 1181 XPC_SET_REASON(part, xpcPhysAddrRegFailed, 1132 - __LINE__); 1182 + __LINE__); 1133 1183 break; 1134 1184 } 1135 1185 ··· 1145 1195 remote_vars->act_phys_cpuid); 1146 1196 1147 1197 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 1148 - version)) { 1198 + version)) { 1149 1199 part->remote_amos_page_pa = 1150 - remote_vars->amos_page_pa; 1200 + remote_vars->amos_page_pa; 1151 1201 xpc_mark_partition_disengaged(part); 1152 1202 xpc_cancel_partition_disengage_request(part); 1153 1203 } ··· 1158 1208 kfree(discovered_nasids); 1159 1209 kfree(remote_rp_base); 1160 1210 } 1161 - 1162 1211 1163 1212 /* 1164 1213 * Given a partid, get the nasids owned by that partition from the ··· 1170 1221 u64 part_nasid_pa; 1171 1222 int bte_res; 1172 1223 1173 - 1174 1224 part = &xpc_partitions[partid]; 1175 1225 if (part->remote_rp_pa == 0) { 1176 1226 return xpcPartitionDown; ··· 1177 1229 1178 1230 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 1179 1231 1180 - part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1232 + part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); 1181 1233 1182 - bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, 1183 - xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1234 + bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask, 1235 + xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), 1236 + NULL); 1184 1237 1185 1238 return xpc_map_bte_errors(bte_res); 1186 1239 } 1187 -