Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers/misc/sgi-xp: clean up return values

Make XP return values more generic to XP and not so tied to XPC by changing
enum xpc_retval to xp_retval, along with changing return value prefixes from
xpc to xp. Also, cleanup a comment block that referenced some of these return
values as well as the handling of BTE related return values.

Signed-off-by: Dean Nelson <dcn@sgi.com>
Acked-by: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Dean Nelson and committed by
Linus Torvalds
65c17b80 0cf942d7

+291 -401
+106 -185
drivers/misc/sgi-xp/xp.h
··· 157 157 /* 158 158 * Define the return values and values passed to user's callout functions. 159 159 * (It is important to add new value codes at the end just preceding 160 - * xpcUnknownReason, which must have the highest numerical value.) 160 + * xpUnknownReason, which must have the highest numerical value.) 161 161 */ 162 - enum xpc_retval { 163 - xpcSuccess = 0, 162 + enum xp_retval { 163 + xpSuccess = 0, 164 164 165 - xpcNotConnected, /* 1: channel is not connected */ 166 - xpcConnected, /* 2: channel connected (opened) */ 167 - xpcRETIRED1, /* 3: (formerly xpcDisconnected) */ 165 + xpNotConnected, /* 1: channel is not connected */ 166 + xpConnected, /* 2: channel connected (opened) */ 167 + xpRETIRED1, /* 3: (formerly xpDisconnected) */ 168 168 169 - xpcMsgReceived, /* 4: message received */ 170 - xpcMsgDelivered, /* 5: message delivered and acknowledged */ 169 + xpMsgReceived, /* 4: message received */ 170 + xpMsgDelivered, /* 5: message delivered and acknowledged */ 171 171 172 - xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */ 172 + xpRETIRED2, /* 6: (formerly xpTransferFailed) */ 173 173 174 - xpcNoWait, /* 7: operation would require wait */ 175 - xpcRetry, /* 8: retry operation */ 176 - xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ 177 - xpcInterrupted, /* 10: interrupted wait */ 174 + xpNoWait, /* 7: operation would require wait */ 175 + xpRetry, /* 8: retry operation */ 176 + xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ 177 + xpInterrupted, /* 10: interrupted wait */ 178 178 179 - xpcUnequalMsgSizes, /* 11: message size disparity between sides */ 180 - xpcInvalidAddress, /* 12: invalid address */ 179 + xpUnequalMsgSizes, /* 11: message size disparity between sides */ 180 + xpInvalidAddress, /* 12: invalid address */ 181 181 182 - xpcNoMemory, /* 13: no memory available for XPC structures */ 183 - xpcLackOfResources, /* 14: insufficient resources for operation */ 184 - xpcUnregistered, /* 15: channel is not registered */ 185 - xpcAlreadyRegistered, /* 16: channel is already registered */ 182 + xpNoMemory, /* 13: no memory available for XPC structures */ 183 + xpLackOfResources, /* 14: insufficient resources for operation */ 184 + xpUnregistered, /* 15: channel is not registered */ 185 + xpAlreadyRegistered, /* 16: channel is already registered */ 186 186 187 - xpcPartitionDown, /* 17: remote partition is down */ 188 - xpcNotLoaded, /* 18: XPC module is not loaded */ 189 - xpcUnloading, /* 19: this side is unloading XPC module */ 187 + xpPartitionDown, /* 17: remote partition is down */ 188 + xpNotLoaded, /* 18: XPC module is not loaded */ 189 + xpUnloading, /* 19: this side is unloading XPC module */ 190 190 191 - xpcBadMagic, /* 20: XPC MAGIC string not found */ 191 + xpBadMagic, /* 20: XPC MAGIC string not found */ 192 192 193 - xpcReactivating, /* 21: remote partition was reactivated */ 193 + xpReactivating, /* 21: remote partition was reactivated */ 194 194 195 - xpcUnregistering, /* 22: this side is unregistering channel */ 196 - xpcOtherUnregistering, /* 23: other side is unregistering channel */ 195 + xpUnregistering, /* 22: this side is unregistering channel */ 196 + xpOtherUnregistering, /* 23: other side is unregistering channel */ 197 197 198 - xpcCloneKThread, /* 24: cloning kernel thread */ 199 - xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */ 198 + xpCloneKThread, /* 24: cloning kernel thread */ 199 + xpCloneKThreadFailed, /* 25: cloning kernel thread failed */ 200 200 201 - xpcNoHeartbeat, /* 26: remote partition has no heartbeat */ 201 + xpNoHeartbeat, /* 26: remote partition has no heartbeat */ 202 202 203 - xpcPioReadError, /* 27: PIO read error */ 204 - xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */ 203 + xpPioReadError, /* 27: PIO read error */ 204 + xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */ 205 205 206 - xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */ 207 - xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */ 208 - xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */ 209 - xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */ 210 - xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */ 211 - xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */ 212 - xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */ 213 - xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */ 214 - xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */ 215 - xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */ 206 + xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */ 207 + xpRETIRED4, /* 30: (formerly xpBtePoisonError) */ 208 + xpRETIRED5, /* 31: (formerly xpBteWriteError) */ 209 + xpRETIRED6, /* 32: (formerly xpBteAccessError) */ 210 + xpRETIRED7, /* 33: (formerly xpBtePWriteError) */ 211 + xpRETIRED8, /* 34: (formerly xpBtePReadError) */ 212 + xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */ 213 + xpRETIRED10, /* 36: (formerly xpBteXtalkError) */ 214 + xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */ 215 + xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */ 216 216 217 - xpcBadVersion, /* 39: bad version number */ 218 - xpcVarsNotSet, /* 40: the XPC variables are not set up */ 219 - xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ 220 - xpcInvalidPartid, /* 42: invalid partition ID */ 221 - xpcLocalPartid, /* 43: local partition ID */ 217 + xpBadVersion, /* 39: bad version number */ 218 + xpVarsNotSet, /* 40: the XPC variables are not set up */ 219 + xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ 220 + xpInvalidPartid, /* 42: invalid partition ID */ 221 + xpLocalPartid, /* 43: local partition ID */ 222 222 223 - xpcOtherGoingDown, /* 44: other side going down, reason unknown */ 224 - xpcSystemGoingDown, /* 45: system is going down, reason unknown */ 225 - xpcSystemHalt, /* 46: system is being halted */ 226 - xpcSystemReboot, /* 47: system is being rebooted */ 227 - xpcSystemPoweroff, /* 48: system is being powered off */ 223 + xpOtherGoingDown, /* 44: other side going down, reason unknown */ 224 + xpSystemGoingDown, /* 45: system is going down, reason unknown */ 225 + xpSystemHalt, /* 46: system is being halted */ 226 + xpSystemReboot, /* 47: system is being rebooted */ 227 + xpSystemPoweroff, /* 48: system is being powered off */ 228 228 229 - xpcDisconnecting, /* 49: channel disconnecting (closing) */ 229 + xpDisconnecting, /* 49: channel disconnecting (closing) */ 230 230 231 - xpcOpenCloseError, /* 50: channel open/close protocol error */ 231 + xpOpenCloseError, /* 50: channel open/close protocol error */ 232 232 233 - xpcDisconnected, /* 51: channel disconnected (closed) */ 233 + xpDisconnected, /* 51: channel disconnected (closed) */ 234 234 235 - xpcBteSh2Start, /* 52: BTE CRB timeout */ 235 + xpBteCopyError, /* 52: bte_copy() returned error */ 236 236 237 - /* 53: 0x1 BTE Error Response Short */ 238 - xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT, 239 - 240 - /* 54: 0x2 BTE Error Response Long */ 241 - xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG, 242 - 243 - /* 56: 0x4 BTE Error Response DSB */ 244 - xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP, 245 - 246 - /* 60: 0x8 BTE Error Response Access */ 247 - xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS, 248 - 249 - /* 68: 0x10 BTE Error CRB timeout */ 250 - xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO, 251 - 252 - /* 84: 0x20 BTE Error NACK limit */ 253 - xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT, 254 - 255 - /* 115: BTE end */ 256 - xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, 257 - 258 - xpcUnknownReason /* 116: unknown reason - must be last in enum */ 237 + xpUnknownReason /* 53: unknown reason - must be last in enum */ 259 238 }; 260 239 261 240 /* 262 - * Define the callout function types used by XPC to update the user on 263 - * connection activity and state changes (via the user function registered by 264 - * xpc_connect()) and to notify them of messages received and delivered (via 265 - * the user function registered by xpc_send_notify()). 266 - * 267 - * The two function types are xpc_channel_func and xpc_notify_func and 268 - * both share the following arguments, with the exception of "data", which 269 - * only xpc_channel_func has. 241 + * Define the callout function type used by XPC to update the user on 242 + * connection activity and state changes via the user function registered 243 + * by xpc_connect(). 270 244 * 271 245 * Arguments: 272 246 * 273 - * reason - reason code. (See following table.) 247 + * reason - reason code. 274 248 * partid - partition ID associated with condition. 275 249 * ch_number - channel # associated with condition. 276 - * data - pointer to optional data. (See following table.) 250 + * data - pointer to optional data. 277 251 * key - pointer to optional user-defined value provided as the "key" 278 - * argument to xpc_connect() or xpc_send_notify(). 252 + * argument to xpc_connect(). 279 253 * 280 - * In the following table the "Optional Data" column applies to callouts made 281 - * to functions registered by xpc_connect(). A "NA" in that column indicates 282 - * that this reason code can be passed to functions registered by 283 - * xpc_send_notify() (i.e. they don't have data arguments). 254 + * A reason code of xpConnected indicates that a connection has been 255 + * established to the specified partition on the specified channel. The data 256 + * argument indicates the max number of entries allowed in the message queue. 284 257 * 285 - * Also, the first three reason codes in the following table indicate 286 - * success, whereas the others indicate failure. When a failure reason code 287 - * is received, one can assume that the channel is not connected. 258 + * A reason code of xpMsgReceived indicates that a XPC message arrived from 259 + * the specified partition on the specified channel. The data argument 260 + * specifies the address of the message's payload. The user must call 261 + * xpc_received() when finished with the payload. 288 262 * 289 - * 290 - * Reason Code | Cause | Optional Data 291 - * =====================+================================+===================== 292 - * xpcConnected | connection has been established| max #of entries 293 - * | to the specified partition on | allowed in message 294 - * | the specified channel | queue 295 - * ---------------------+--------------------------------+--------------------- 296 - * xpcMsgReceived | an XPC message arrived from | address of payload 297 - * | the specified partition on the | 298 - * | specified channel | [the user must call 299 - * | | xpc_received() when 300 - * | | finished with the 301 - * | | payload] 302 - * ---------------------+--------------------------------+--------------------- 303 - * xpcMsgDelivered | notification that the message | NA 304 - * | was delivered to the intended | 305 - * | recipient and that they have | 306 - * | acknowledged its receipt by | 307 - * | calling xpc_received() | 308 - * =====================+================================+===================== 309 - * xpcUnequalMsgSizes | can't connect to the specified | NULL 310 - * | partition on the specified | 311 - * | channel because of mismatched | 312 - * | message sizes | 313 - * ---------------------+--------------------------------+--------------------- 314 - * xpcNoMemory | insufficient memory avaiable | NULL 315 - * | to allocate message queue | 316 - * ---------------------+--------------------------------+--------------------- 317 - * xpcLackOfResources | lack of resources to create | NULL 318 - * | the necessary kthreads to | 319 - * | support the channel | 320 - * ---------------------+--------------------------------+--------------------- 321 - * xpcUnregistering | this side's user has | NULL or NA 322 - * | unregistered by calling | 323 - * | xpc_disconnect() | 324 - * ---------------------+--------------------------------+--------------------- 325 - * xpcOtherUnregistering| the other side's user has | NULL or NA 326 - * | unregistered by calling | 327 - * | xpc_disconnect() | 328 - * ---------------------+--------------------------------+--------------------- 329 - * xpcNoHeartbeat | the other side's XPC is no | NULL or NA 330 - * | longer heartbeating | 331 - * | | 332 - * ---------------------+--------------------------------+--------------------- 333 - * xpcUnloading | this side's XPC module is | NULL or NA 334 - * | being unloaded | 335 - * | | 336 - * ---------------------+--------------------------------+--------------------- 337 - * xpcOtherUnloading | the other side's XPC module is | NULL or NA 338 - * | is being unloaded | 339 - * | | 340 - * ---------------------+--------------------------------+--------------------- 341 - * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA 342 - * | error while sending an IPI | 343 - * | | 344 - * ---------------------+--------------------------------+--------------------- 345 - * xpcInvalidAddress | the address either received or | NULL or NA 346 - * | sent by the specified partition| 347 - * | is invalid | 348 - * ---------------------+--------------------------------+--------------------- 349 - * xpcBteNotAvailable | attempt to pull data from the | NULL or NA 350 - * xpcBtePoisonError | specified partition over the | 351 - * xpcBteWriteError | specified channel via a | 352 - * xpcBteAccessError | bte_copy() failed | 353 - * xpcBteTimeOutError | | 354 - * xpcBteXtalkError | | 355 - * xpcBteDirectoryError | | 356 - * xpcBteGenericError | | 357 - * xpcBteUnmappedError | | 358 - * ---------------------+--------------------------------+--------------------- 359 - * xpcUnknownReason | the specified channel to the | NULL or NA 360 - * | specified partition was | 361 - * | unavailable for unknown reasons| 362 - * =====================+================================+===================== 263 + * All other reason codes indicate failure. The data argmument is NULL. 264 + * When a failure reason code is received, one can assume that the channel 265 + * is not connected. 363 266 */ 364 - 365 - typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid, 267 + typedef void (*xpc_channel_func) (enum xp_retval reason, partid_t partid, 366 268 int ch_number, void *data, void *key); 367 269 368 - typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid, 270 + /* 271 + * Define the callout function type used by XPC to notify the user of 272 + * messages received and delivered via the user function registered by 273 + * xpc_send_notify(). 274 + * 275 + * Arguments: 276 + * 277 + * reason - reason code. 278 + * partid - partition ID associated with condition. 279 + * ch_number - channel # associated with condition. 280 + * key - pointer to optional user-defined value provided as the "key" 281 + * argument to xpc_send_notify(). 282 + * 283 + * A reason code of xpMsgDelivered indicates that the message was delivered 284 + * to the intended recipient and that they have acknowledged its receipt by 285 + * calling xpc_received(). 286 + * 287 + * All other reason codes indicate failure. 288 + */ 289 + typedef void (*xpc_notify_func) (enum xp_retval reason, partid_t partid, 369 290 int ch_number, void *key); 370 291 371 292 /* ··· 322 401 struct xpc_interface { 323 402 void (*connect) (int); 324 403 void (*disconnect) (int); 325 - enum xpc_retval (*allocate) (partid_t, int, u32, void **); 326 - enum xpc_retval (*send) (partid_t, int, void *); 327 - enum xpc_retval (*send_notify) (partid_t, int, void *, 404 + enum xp_retval (*allocate) (partid_t, int, u32, void **); 405 + enum xp_retval (*send) (partid_t, int, void *); 406 + enum xp_retval (*send_notify) (partid_t, int, void *, 328 407 xpc_notify_func, void *); 329 408 void (*received) (partid_t, int, void *); 330 - enum xpc_retval (*partid_to_nasids) (partid_t, void *); 409 + enum xp_retval (*partid_to_nasids) (partid_t, void *); 331 410 }; 332 411 333 412 extern struct xpc_interface xpc_interface; 334 413 335 414 extern void xpc_set_interface(void (*)(int), 336 415 void (*)(int), 337 - enum xpc_retval (*)(partid_t, int, u32, void **), 338 - enum xpc_retval (*)(partid_t, int, void *), 339 - enum xpc_retval (*)(partid_t, int, void *, 416 + enum xp_retval (*)(partid_t, int, u32, void **), 417 + enum xp_retval (*)(partid_t, int, void *), 418 + enum xp_retval (*)(partid_t, int, void *, 340 419 xpc_notify_func, void *), 341 420 void (*)(partid_t, int, void *), 342 - enum xpc_retval (*)(partid_t, void *)); 421 + enum xp_retval (*)(partid_t, void *)); 343 422 extern void xpc_clear_interface(void); 344 423 345 - extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, 424 + extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16, 346 425 u16, u32, u32); 347 426 extern void xpc_disconnect(int); 348 427 349 - static inline enum xpc_retval 428 + static inline enum xp_retval 350 429 xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload) 351 430 { 352 431 return xpc_interface.allocate(partid, ch_number, flags, payload); 353 432 } 354 433 355 - static inline enum xpc_retval 434 + static inline enum xp_retval 356 435 xpc_send(partid_t partid, int ch_number, void *payload) 357 436 { 358 437 return xpc_interface.send(partid, ch_number, payload); 359 438 } 360 439 361 - static inline enum xpc_retval 440 + static inline enum xp_retval 362 441 xpc_send_notify(partid_t partid, int ch_number, void *payload, 363 442 xpc_notify_func func, void *key) 364 443 { ··· 371 450 return xpc_interface.received(partid, ch_number, payload); 372 451 } 373 452 374 - static inline enum xpc_retval 453 + static inline enum xp_retval 375 454 xpc_partid_to_nasids(partid_t partid, void *nasids) 376 455 { 377 456 return xpc_interface.partid_to_nasids(partid, nasids);
+19 -19
drivers/misc/sgi-xp/xp_main.c
··· 42 42 /* 43 43 * Initialize the XPC interface to indicate that XPC isn't loaded. 44 44 */ 45 - static enum xpc_retval 45 + static enum xp_retval 46 46 xpc_notloaded(void) 47 47 { 48 - return xpcNotLoaded; 48 + return xpNotLoaded; 49 49 } 50 50 51 51 struct xpc_interface xpc_interface = { 52 52 (void (*)(int))xpc_notloaded, 53 53 (void (*)(int))xpc_notloaded, 54 - (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded, 55 - (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded, 56 - (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) 54 + (enum xp_retval(*)(partid_t, int, u32, void **))xpc_notloaded, 55 + (enum xp_retval(*)(partid_t, int, void *))xpc_notloaded, 56 + (enum xp_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) 57 57 xpc_notloaded, 58 58 (void (*)(partid_t, int, void *))xpc_notloaded, 59 - (enum xpc_retval(*)(partid_t, void *))xpc_notloaded 59 + (enum xp_retval(*)(partid_t, void *))xpc_notloaded 60 60 }; 61 61 EXPORT_SYMBOL_GPL(xpc_interface); 62 62 ··· 66 66 void 67 67 xpc_set_interface(void (*connect) (int), 68 68 void (*disconnect) (int), 69 - enum xpc_retval (*allocate) (partid_t, int, u32, void **), 70 - enum xpc_retval (*send) (partid_t, int, void *), 71 - enum xpc_retval (*send_notify) (partid_t, int, void *, 69 + enum xp_retval (*allocate) (partid_t, int, u32, void **), 70 + enum xp_retval (*send) (partid_t, int, void *), 71 + enum xp_retval (*send_notify) (partid_t, int, void *, 72 72 xpc_notify_func, void *), 73 73 void (*received) (partid_t, int, void *), 74 - enum xpc_retval (*partid_to_nasids) (partid_t, void *)) 74 + enum xp_retval (*partid_to_nasids) (partid_t, void *)) 75 75 { 76 76 xpc_interface.connect = connect; 77 77 xpc_interface.disconnect = disconnect; ··· 91 91 { 92 92 xpc_interface.connect = (void (*)(int))xpc_notloaded; 93 93 xpc_interface.disconnect = (void (*)(int))xpc_notloaded; 94 - xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32, 94 + xpc_interface.allocate = (enum xp_retval(*)(partid_t, int, u32, 95 95 void **))xpc_notloaded; 96 - xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *)) 96 + xpc_interface.send = (enum xp_retval(*)(partid_t, int, void *)) 97 97 xpc_notloaded; 98 - xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *, 98 + xpc_interface.send_notify = (enum xp_retval(*)(partid_t, int, void *, 99 99 xpc_notify_func, 100 100 void *))xpc_notloaded; 101 101 xpc_interface.received = (void (*)(partid_t, int, void *)) 102 102 xpc_notloaded; 103 - xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) 103 + xpc_interface.partid_to_nasids = (enum xp_retval(*)(partid_t, void *)) 104 104 xpc_notloaded; 105 105 } 106 106 EXPORT_SYMBOL_GPL(xpc_clear_interface); ··· 123 123 * nentries - max #of XPC message entries a message queue can contain. 124 124 * The actual number, which is determined when a connection 125 125 * is established and may be less then requested, will be 126 - * passed to the user via the xpcConnected callout. 126 + * passed to the user via the xpConnected callout. 127 127 * assigned_limit - max number of kthreads allowed to be processing 128 128 * messages (per connection) at any given instant. 129 129 * idle_limit - max number of kthreads allowed to be idle at any given 130 130 * instant. 131 131 */ 132 - enum xpc_retval 132 + enum xp_retval 133 133 xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, 134 134 u16 nentries, u32 assigned_limit, u32 idle_limit) 135 135 { ··· 143 143 registration = &xpc_registrations[ch_number]; 144 144 145 145 if (mutex_lock_interruptible(&registration->mutex) != 0) 146 - return xpcInterrupted; 146 + return xpInterrupted; 147 147 148 148 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 149 149 if (registration->func != NULL) { 150 150 mutex_unlock(&registration->mutex); 151 - return xpcAlreadyRegistered; 151 + return xpAlreadyRegistered; 152 152 } 153 153 154 154 /* register the channel for connection */ ··· 163 163 164 164 xpc_interface.connect(ch_number); 165 165 166 - return xpcSuccess; 166 + return xpSuccess; 167 167 } 168 168 EXPORT_SYMBOL_GPL(xpc_connect); 169 169
+20 -51
drivers/misc/sgi-xp/xpc.h
··· 412 412 spinlock_t lock; /* lock for updating this structure */ 413 413 u32 flags; /* general flags */ 414 414 415 - enum xpc_retval reason; /* reason why channel is disconnect'g */ 415 + enum xp_retval reason; /* reason why channel is disconnect'g */ 416 416 int reason_line; /* line# disconnect initiated from */ 417 417 418 418 u16 number; /* channel # */ ··· 522 522 spinlock_t act_lock; /* protect updating of act_state */ 523 523 u8 act_state; /* from XPC HB viewpoint */ 524 524 u8 remote_vars_version; /* version# of partition's vars */ 525 - enum xpc_retval reason; /* reason partition is deactivating */ 525 + enum xp_retval reason; /* reason partition is deactivating */ 526 526 int reason_line; /* line# deactivation initiated from */ 527 527 int reactivate_nasid; /* nasid in partition to reactivate */ 528 528 ··· 646 646 extern void xpc_restrict_IPI_ops(void); 647 647 extern int xpc_identify_act_IRQ_sender(void); 648 648 extern int xpc_partition_disengaged(struct xpc_partition *); 649 - extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); 649 + extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); 650 650 extern void xpc_mark_partition_inactive(struct xpc_partition *); 651 651 extern void xpc_discovery(void); 652 652 extern void xpc_check_remote_hb(void); 653 653 extern void xpc_deactivate_partition(const int, struct xpc_partition *, 654 - enum xpc_retval); 655 - extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); 654 + enum xp_retval); 655 + extern enum xp_retval xpc_initiate_partid_to_nasids(partid_t, void *); 656 656 657 657 /* found in xpc_channel.c */ 658 658 extern void xpc_initiate_connect(int); 659 659 extern void xpc_initiate_disconnect(int); 660 - extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); 661 - extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); 662 - extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, 663 - xpc_notify_func, void *); 660 + extern enum xp_retval xpc_initiate_allocate(partid_t, int, u32, void **); 661 + extern enum xp_retval xpc_initiate_send(partid_t, int, void *); 662 + extern enum xp_retval xpc_initiate_send_notify(partid_t, int, void *, 663 + xpc_notify_func, void *); 664 664 extern void xpc_initiate_received(partid_t, int, void *); 665 - extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); 666 - extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); 665 + extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *); 666 + extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *); 667 667 extern void xpc_process_channel_activity(struct xpc_partition *); 668 668 extern void xpc_connected_callout(struct xpc_channel *); 669 669 extern void xpc_deliver_msg(struct xpc_channel *); 670 670 extern void xpc_disconnect_channel(const int, struct xpc_channel *, 671 - enum xpc_retval, unsigned long *); 672 - extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); 673 - extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); 671 + enum xp_retval, unsigned long *); 672 + extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); 673 + extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); 674 674 extern void xpc_teardown_infrastructure(struct xpc_partition *); 675 675 676 676 static inline void ··· 901 901 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); 902 902 } 903 903 904 - static inline enum xpc_retval 904 + static inline enum xp_retval 905 905 xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) 906 906 { 907 907 int ret = 0; ··· 923 923 924 924 local_irq_restore(irq_flags); 925 925 926 - return ((ret == 0) ? xpcSuccess : xpcPioReadError); 926 + return ((ret == 0) ? xpSuccess : xpPioReadError); 927 927 } 928 928 929 929 /* ··· 992 992 unsigned long *irq_flags) 993 993 { 994 994 struct xpc_partition *part = &xpc_partitions[ch->partid]; 995 - enum xpc_retval ret; 995 + enum xp_retval ret; 996 996 997 997 if (likely(part->act_state != XPC_P_DEACTIVATING)) { 998 998 ret = xpc_IPI_send(part->remote_IPI_amo_va, ··· 1001 1001 part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); 1002 1002 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 1003 1003 ipi_flag_string, ch->partid, ch->number, ret); 1004 - if (unlikely(ret != xpcSuccess)) { 1004 + if (unlikely(ret != xpSuccess)) { 1005 1005 if (irq_flags != NULL) 1006 1006 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1007 1007 XPC_DEACTIVATE_PARTITION(part, ret); ··· 1123 1123 return amo; 1124 1124 } 1125 1125 1126 - static inline enum xpc_retval 1126 + static inline enum xp_retval 1127 1127 xpc_map_bte_errors(bte_result_t error) 1128 1128 { 1129 - if (error == BTE_SUCCESS) 1130 - return xpcSuccess; 1131 - 1132 - if (is_shub2()) { 1133 - if (BTE_VALID_SH2_ERROR(error)) 1134 - return xpcBteSh2Start + error; 1135 - return xpcBteUnmappedError; 1136 - } 1137 - switch (error) { 1138 - case BTE_SUCCESS: 1139 - return xpcSuccess; 1140 - case BTEFAIL_DIR: 1141 - return xpcBteDirectoryError; 1142 - case BTEFAIL_POISON: 1143 - return xpcBtePoisonError; 1144 - case BTEFAIL_WERR: 1145 - return xpcBteWriteError; 1146 - case BTEFAIL_ACCESS: 1147 - return xpcBteAccessError; 1148 - case BTEFAIL_PWERR: 1149 - return xpcBtePWriteError; 1150 - case BTEFAIL_PRERR: 1151 - return xpcBtePReadError; 1152 - case BTEFAIL_TOUT: 1153 - return xpcBteTimeOutError; 1154 - case BTEFAIL_XTERR: 1155 - return xpcBteXtalkError; 1156 - case BTEFAIL_NOTAVAIL: 1157 - return xpcBteNotAvailable; 1158 - default: 1159 - return xpcBteUnmappedError; 1160 - } 1129 + return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError); 1161 1130 } 1162 1131 1163 1132 /*
+83 -83
drivers/misc/sgi-xp/xpc_channel.c
··· 90 90 * Setup the infrastructure necessary to support XPartition Communication 91 91 * between the specified remote partition and the local one. 92 92 */ 93 - enum xpc_retval 93 + enum xp_retval 94 94 xpc_setup_infrastructure(struct xpc_partition *part) 95 95 { 96 96 int ret, cpuid; ··· 114 114 GFP_KERNEL); 115 115 if (part->channels == NULL) { 116 116 dev_err(xpc_chan, "can't get memory for channels\n"); 117 - return xpcNoMemory; 117 + return xpNoMemory; 118 118 } 119 119 120 120 part->nchannels = XPC_NCHANNELS; ··· 129 129 part->channels = NULL; 130 130 dev_err(xpc_chan, "can't get memory for local get/put " 131 131 "values\n"); 132 - return xpcNoMemory; 132 + return xpNoMemory; 133 133 } 134 134 135 135 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, ··· 143 143 part->local_GPs = NULL; 144 144 kfree(part->channels); 145 145 part->channels = NULL; 146 - return xpcNoMemory; 146 + return xpNoMemory; 147 147 } 148 148 149 149 /* allocate all the required open and close args */ ··· 159 159 part->local_GPs = NULL; 160 160 kfree(part->channels); 161 161 part->channels = NULL; 162 - return xpcNoMemory; 162 + return xpNoMemory; 163 163 } 164 164 165 165 part->remote_openclose_args = ··· 175 175 part->local_GPs = NULL; 176 176 kfree(part->channels); 177 177 part->channels = NULL; 178 - return xpcNoMemory; 178 + return xpNoMemory; 179 179 } 180 180 181 181 xpc_initialize_channels(part, partid); ··· 209 209 part->local_GPs = NULL; 210 210 kfree(part->channels); 211 211 part->channels = NULL; 212 - return xpcLackOfResources; 212 + return xpLackOfResources; 213 213 } 214 214 215 215 /* Setup a timer to check for dropped IPIs */ ··· 243 243 xpc_vars_part[partid].nchannels = part->nchannels; 244 244 xpc_vars_part[partid].magic = XPC_VP_MAGIC1; 245 245 246 - return xpcSuccess; 246 + return xpSuccess; 247 247 } 248 248 249 249 /* ··· 254 254 * dst must be a cacheline aligned virtual address on this partition. 255 255 * cnt must be an cacheline sized 256 256 */ 257 - static enum xpc_retval 257 + static enum xp_retval 258 258 xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, 259 259 const void *src, size_t cnt) 260 260 { ··· 270 270 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, 271 271 (BTE_NORMAL | BTE_WACQUIRE), NULL); 272 272 if (bte_ret == BTE_SUCCESS) 273 - return xpcSuccess; 273 + return xpSuccess; 274 274 275 275 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", 276 276 XPC_PARTID(part), bte_ret); ··· 282 282 * Pull the remote per partition specific variables from the specified 283 283 * partition. 284 284 */ 285 - enum xpc_retval 285 + enum xp_retval 286 286 xpc_pull_remote_vars_part(struct xpc_partition *part) 287 287 { 288 288 u8 buffer[L1_CACHE_BYTES * 2]; ··· 291 291 struct xpc_vars_part *pulled_entry; 292 292 u64 remote_entry_cacheline_pa, remote_entry_pa; 293 293 partid_t partid = XPC_PARTID(part); 294 - enum xpc_retval ret; 294 + enum xp_retval ret; 295 295 296 296 /* pull the cacheline that contains the variables we're interested in */ 297 297 ··· 311 311 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, 312 312 (void *)remote_entry_cacheline_pa, 313 313 L1_CACHE_BYTES); 314 - if (ret != xpcSuccess) { 314 + if (ret != xpSuccess) { 315 315 dev_dbg(xpc_chan, "failed to pull XPC vars_part from " 316 316 "partition %d, ret=%d\n", partid, ret); 317 317 return ret; ··· 326 326 dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 327 327 "partition %d has bad magic value (=0x%lx)\n", 328 328 partid, sn_partition_id, pulled_entry->magic); 329 - return xpcBadMagic; 329 + return xpBadMagic; 330 330 } 331 331 332 332 /* they've not been initialized yet */ 333 - return xpcRetry; 333 + return xpRetry; 334 334 } 335 335 336 336 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { ··· 344 344 dev_err(xpc_chan, "partition %d's XPC vars_part for " 345 345 "partition %d are not valid\n", partid, 346 346 sn_partition_id); 347 - return xpcInvalidAddress; 347 + return xpInvalidAddress; 348 348 } 349 349 350 350 /* the variables we imported look to be valid */ ··· 366 366 } 367 367 368 368 if (pulled_entry->magic == XPC_VP_MAGIC1) 369 - return xpcRetry; 369 + return xpRetry; 370 370 371 - return xpcSuccess; 371 + return xpSuccess; 372 372 } 373 373 374 374 /* ··· 379 379 { 380 380 unsigned long irq_flags; 381 381 u64 IPI_amo; 382 - enum xpc_retval ret; 382 + enum xp_retval ret; 383 383 384 384 /* 385 385 * See if there are any IPI flags to be handled. ··· 398 398 (void *)part-> 399 399 remote_openclose_args_pa, 400 400 XPC_OPENCLOSE_ARGS_SIZE); 401 - if (ret != xpcSuccess) { 401 + if (ret != xpSuccess) { 402 402 XPC_DEACTIVATE_PARTITION(part, ret); 403 403 404 404 dev_dbg(xpc_chan, "failed to pull openclose args from " ··· 414 414 ret = xpc_pull_remote_cachelines(part, part->remote_GPs, 415 415 (void *)part->remote_GPs_pa, 416 416 XPC_GP_SIZE); 417 - if (ret != xpcSuccess) { 417 + if (ret != xpSuccess) { 418 418 XPC_DEACTIVATE_PARTITION(part, ret); 419 419 420 420 dev_dbg(xpc_chan, "failed to pull GPs from partition " ··· 431 431 /* 432 432 * Allocate the local message queue and the notify queue. 433 433 */ 434 - static enum xpc_retval 434 + static enum xp_retval 435 435 xpc_allocate_local_msgqueue(struct xpc_channel *ch) 436 436 { 437 437 unsigned long irq_flags; ··· 464 464 ch->local_nentries = nentries; 465 465 } 466 466 spin_unlock_irqrestore(&ch->lock, irq_flags); 467 - return xpcSuccess; 467 + return xpSuccess; 468 468 } 469 469 470 470 dev_dbg(xpc_chan, "can't get memory for local message queue and notify " 471 471 "queue, partid=%d, channel=%d\n", ch->partid, ch->number); 472 - return xpcNoMemory; 472 + return xpNoMemory; 473 473 } 474 474 475 475 /* 476 476 * Allocate the cached remote message queue. 477 477 */ 478 - static enum xpc_retval 478 + static enum xp_retval 479 479 xpc_allocate_remote_msgqueue(struct xpc_channel *ch) 480 480 { 481 481 unsigned long irq_flags; ··· 502 502 ch->remote_nentries = nentries; 503 503 } 504 504 spin_unlock_irqrestore(&ch->lock, irq_flags); 505 - return xpcSuccess; 505 + return xpSuccess; 506 506 } 507 507 508 508 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " 509 509 "partid=%d, channel=%d\n", ch->partid, ch->number); 510 - return xpcNoMemory; 510 + return xpNoMemory; 511 511 } 512 512 513 513 /* ··· 515 515 * 516 516 * Note: Assumes all of the channel sizes are filled in. 517 517 */ 518 - static enum xpc_retval 518 + static enum xp_retval 519 519 xpc_allocate_msgqueues(struct xpc_channel *ch) 520 520 { 521 521 unsigned long irq_flags; 522 - enum xpc_retval ret; 522 + enum xp_retval ret; 523 523 524 524 DBUG_ON(ch->flags & XPC_C_SETUP); 525 525 526 526 ret = xpc_allocate_local_msgqueue(ch); 527 - if (ret != xpcSuccess) 527 + if (ret != xpSuccess) 528 528 return ret; 529 529 530 530 ret = xpc_allocate_remote_msgqueue(ch); 531 - if (ret != xpcSuccess) { 531 + if (ret != xpSuccess) { 532 532 kfree(ch->local_msgqueue_base); 533 533 ch->local_msgqueue = NULL; 534 534 kfree(ch->notify_queue); ··· 540 540 ch->flags |= XPC_C_SETUP; 541 541 spin_unlock_irqrestore(&ch->lock, irq_flags); 542 542 543 - return xpcSuccess; 543 + return xpSuccess; 544 544 } 545 545 546 546 /* ··· 552 552 static void 553 553 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) 554 554 { 555 - enum xpc_retval ret; 555 + enum xp_retval ret; 556 556 557 557 DBUG_ON(!spin_is_locked(&ch->lock)); 558 558 ··· 568 568 ret = xpc_allocate_msgqueues(ch); 569 569 spin_lock_irqsave(&ch->lock, *irq_flags); 570 570 571 - if (ret != xpcSuccess) 571 + if (ret != xpSuccess) 572 572 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 573 573 574 574 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) ··· 603 603 * Notify those who wanted to be notified upon delivery of their message. 604 604 */ 605 605 static void 606 - xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) 606 + xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put) 607 607 { 608 608 struct xpc_notify *notify; 609 609 u8 notify_type; ··· 748 748 749 749 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { 750 750 spin_unlock_irqrestore(&ch->lock, *irq_flags); 751 - xpc_disconnect_callout(ch, xpcDisconnected); 751 + xpc_disconnect_callout(ch, xpDisconnected); 752 752 spin_lock_irqsave(&ch->lock, *irq_flags); 753 753 } 754 754 ··· 791 791 struct xpc_openclose_args *args = 792 792 &part->remote_openclose_args[ch_number]; 793 793 struct xpc_channel *ch = &part->channels[ch_number]; 794 - enum xpc_retval reason; 794 + enum xp_retval reason; 795 795 796 796 spin_lock_irqsave(&ch->lock, irq_flags); 797 797 ··· 871 871 872 872 if (!(ch->flags & XPC_C_DISCONNECTING)) { 873 873 reason = args->reason; 874 - if (reason <= xpcSuccess || reason > xpcUnknownReason) 875 - reason = xpcUnknownReason; 876 - else if (reason == xpcUnregistering) 877 - reason = xpcOtherUnregistering; 874 + if (reason <= xpSuccess || reason > xpUnknownReason) 875 + reason = xpUnknownReason; 876 + else if (reason == xpUnregistering) 877 + reason = xpOtherUnregistering; 878 878 879 879 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 880 880 ··· 961 961 962 962 if (ch->flags & XPC_C_OPENREQUEST) { 963 963 if (args->msg_size != ch->msg_size) { 964 - XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 964 + XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 965 965 &irq_flags); 966 966 spin_unlock_irqrestore(&ch->lock, irq_flags); 967 967 return; ··· 991 991 return; 992 992 } 993 993 if (!(ch->flags & XPC_C_OPENREQUEST)) { 994 - XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, 994 + XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, 995 995 &irq_flags); 996 996 spin_unlock_irqrestore(&ch->lock, irq_flags); 997 997 return; ··· 1042 1042 /* 1043 1043 * Attempt to establish a channel connection to a remote partition. 1044 1044 */ 1045 - static enum xpc_retval 1045 + static enum xp_retval 1046 1046 xpc_connect_channel(struct xpc_channel *ch) 1047 1047 { 1048 1048 unsigned long irq_flags; 1049 1049 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1050 1050 1051 1051 if (mutex_trylock(&registration->mutex) == 0) 1052 - return xpcRetry; 1052 + return xpRetry; 1053 1053 1054 1054 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1055 1055 mutex_unlock(&registration->mutex); 1056 - return xpcUnregistered; 1056 + return xpUnregistered; 1057 1057 } 1058 1058 1059 1059 spin_lock_irqsave(&ch->lock, irq_flags); ··· 1095 1095 * the channel lock as needed. 1096 1096 */ 1097 1097 mutex_unlock(&registration->mutex); 1098 - XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1098 + XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 1099 1099 &irq_flags); 1100 1100 spin_unlock_irqrestore(&ch->lock, irq_flags); 1101 - return xpcUnequalMsgSizes; 1101 + return xpUnequalMsgSizes; 1102 1102 } 1103 1103 } else { 1104 1104 ch->msg_size = registration->msg_size; ··· 1120 1120 1121 1121 spin_unlock_irqrestore(&ch->lock, irq_flags); 1122 1122 1123 - return xpcSuccess; 1123 + return xpSuccess; 1124 1124 } 1125 1125 1126 1126 /* ··· 1203 1203 * Notify senders that messages sent have been 1204 1204 * received and delivered by the other side. 1205 1205 */ 1206 - xpc_notify_senders(ch, xpcMsgDelivered, 1206 + xpc_notify_senders(ch, xpMsgDelivered, 1207 1207 ch->remote_GP.get); 1208 1208 } 1209 1209 ··· 1335 1335 * at the same time. 1336 1336 */ 1337 1337 void 1338 - xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) 1338 + xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) 1339 1339 { 1340 1340 unsigned long irq_flags; 1341 1341 int ch_number; ··· 1456 1456 /* let the registerer know that a connection has been established */ 1457 1457 1458 1458 if (ch->func != NULL) { 1459 - dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " 1459 + dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " 1460 1460 "partid=%d, channel=%d\n", ch->partid, ch->number); 1461 1461 1462 - ch->func(xpcConnected, ch->partid, ch->number, 1462 + ch->func(xpConnected, ch->partid, ch->number, 1463 1463 (void *)(u64)ch->local_nentries, ch->key); 1464 1464 1465 - dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " 1465 + dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " 1466 1466 "partid=%d, channel=%d\n", ch->partid, ch->number); 1467 1467 } 1468 1468 } ··· 1503 1503 if (!(ch->flags & XPC_C_DISCONNECTED)) { 1504 1504 ch->flags |= XPC_C_WDISCONNECT; 1505 1505 1506 - XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, 1506 + XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, 1507 1507 &irq_flags); 1508 1508 } 1509 1509 ··· 1528 1528 */ 1529 1529 void 1530 1530 xpc_disconnect_channel(const int line, struct xpc_channel *ch, 1531 - enum xpc_retval reason, unsigned long *irq_flags) 1531 + enum xp_retval reason, unsigned long *irq_flags) 1532 1532 { 1533 1533 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 1534 1534 ··· 1563 1563 1564 1564 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 1565 1565 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 1566 - /* start a kthread that will do the xpcDisconnecting callout */ 1566 + /* start a kthread that will do the xpDisconnecting callout */ 1567 1567 xpc_create_kthreads(ch, 1, 1); 1568 1568 } 1569 1569 ··· 1575 1575 } 1576 1576 1577 1577 void 1578 - xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) 1578 + xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) 1579 1579 { 1580 1580 /* 1581 1581 * Let the channel's registerer know that the channel is being ··· 1598 1598 * Wait for a message entry to become available for the specified channel, 1599 1599 * but don't wait any longer than 1 jiffy. 1600 1600 */ 1601 - static enum xpc_retval 1601 + static enum xp_retval 1602 1602 xpc_allocate_msg_wait(struct xpc_channel *ch) 1603 1603 { 1604 - enum xpc_retval ret; 1604 + enum xp_retval ret; 1605 1605 1606 1606 if (ch->flags & XPC_C_DISCONNECTING) { 1607 - DBUG_ON(ch->reason == xpcInterrupted); 1607 + DBUG_ON(ch->reason == xpInterrupted); 1608 1608 return ch->reason; 1609 1609 } 1610 1610 ··· 1614 1614 1615 1615 if (ch->flags & XPC_C_DISCONNECTING) { 1616 1616 ret = ch->reason; 1617 - DBUG_ON(ch->reason == xpcInterrupted); 1617 + DBUG_ON(ch->reason == xpInterrupted); 1618 1618 } else if (ret == 0) { 1619 - ret = xpcTimeout; 1619 + ret = xpTimeout; 1620 1620 } else { 1621 - ret = xpcInterrupted; 1621 + ret = xpInterrupted; 1622 1622 } 1623 1623 1624 1624 return ret; ··· 1628 1628 * Allocate an entry for a message from the message queue associated with the 1629 1629 * specified channel. 1630 1630 */ 1631 - static enum xpc_retval 1631 + static enum xp_retval 1632 1632 xpc_allocate_msg(struct xpc_channel *ch, u32 flags, 1633 1633 struct xpc_msg **address_of_msg) 1634 1634 { 1635 1635 struct xpc_msg *msg; 1636 - enum xpc_retval ret; 1636 + enum xp_retval ret; 1637 1637 s64 put; 1638 1638 1639 1639 /* this reference will be dropped in xpc_send_msg() */ ··· 1645 1645 } 1646 1646 if (!(ch->flags & XPC_C_CONNECTED)) { 1647 1647 xpc_msgqueue_deref(ch); 1648 - return xpcNotConnected; 1648 + return xpNotConnected; 1649 1649 } 1650 1650 1651 1651 /* ··· 1653 1653 * If none are available, we'll make sure that we grab the latest 1654 1654 * GP values. 1655 1655 */ 1656 - ret = xpcTimeout; 1656 + ret = xpTimeout; 1657 1657 1658 1658 while (1) { 1659 1659 ··· 1683 1683 * that will cause the IPI handler to fetch the latest 1684 1684 * GP values as if an IPI was sent by the other side. 1685 1685 */ 1686 - if (ret == xpcTimeout) 1686 + if (ret == xpTimeout) 1687 1687 xpc_IPI_send_local_msgrequest(ch); 1688 1688 1689 1689 if (flags & XPC_NOWAIT) { 1690 1690 xpc_msgqueue_deref(ch); 1691 - return xpcNoWait; 1691 + return xpNoWait; 1692 1692 } 1693 1693 1694 1694 ret = xpc_allocate_msg_wait(ch); 1695 - if (ret != xpcInterrupted && ret != xpcTimeout) { 1695 + if (ret != xpInterrupted && ret != xpTimeout) { 1696 1696 xpc_msgqueue_deref(ch); 1697 1697 return ret; 1698 1698 } ··· 1711 1711 1712 1712 *address_of_msg = msg; 1713 1713 1714 - return xpcSuccess; 1714 + return xpSuccess; 1715 1715 } 1716 1716 1717 1717 /* ··· 1727 1727 * payload - address of the allocated payload area pointer (filled in on 1728 1728 * return) in which the user-defined message is constructed. 1729 1729 */ 1730 - enum xpc_retval 1730 + enum xp_retval 1731 1731 xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) 1732 1732 { 1733 1733 struct xpc_partition *part = &xpc_partitions[partid]; 1734 - enum xpc_retval ret = xpcUnknownReason; 1734 + enum xp_retval ret = xpUnknownReason; 1735 1735 struct xpc_msg *msg = NULL; 1736 1736 1737 1737 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); ··· 1814 1814 * local message queue's Put value and sends an IPI to the partition the 1815 1815 * message is being sent to. 1816 1816 */ 1817 - static enum xpc_retval 1817 + static enum xp_retval 1818 1818 xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, 1819 1819 xpc_notify_func func, void *key) 1820 1820 { 1821 - enum xpc_retval ret = xpcSuccess; 1821 + enum xp_retval ret = xpSuccess; 1822 1822 struct xpc_notify *notify = notify; 1823 1823 s64 put, msg_number = msg->number; 1824 1824 ··· 1908 1908 * payload - pointer to the payload area allocated via 1909 1909 * xpc_initiate_allocate(). 1910 1910 */ 1911 - enum xpc_retval 1911 + enum xp_retval 1912 1912 xpc_initiate_send(partid_t partid, int ch_number, void *payload) 1913 1913 { 1914 1914 struct xpc_partition *part = &xpc_partitions[partid]; 1915 1915 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1916 - enum xpc_retval ret; 1916 + enum xp_retval ret; 1917 1917 1918 1918 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, 1919 1919 partid, ch_number); ··· 1957 1957 * receipt. THIS FUNCTION MUST BE NON-BLOCKING. 1958 1958 * key - user-defined key to be passed to the function when it's called. 1959 1959 */ 1960 - enum xpc_retval 1960 + enum xp_retval 1961 1961 xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, 1962 1962 xpc_notify_func func, void *key) 1963 1963 { 1964 1964 struct xpc_partition *part = &xpc_partitions[partid]; 1965 1965 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1966 - enum xpc_retval ret; 1966 + enum xp_retval ret; 1967 1967 1968 1968 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, 1969 1969 partid, ch_number); ··· 1985 1985 struct xpc_msg *remote_msg, *msg; 1986 1986 u32 msg_index, nmsgs; 1987 1987 u64 msg_offset; 1988 - enum xpc_retval ret; 1988 + enum xp_retval ret; 1989 1989 1990 1990 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { 1991 1991 /* we were interrupted by a signal */ ··· 2012 2012 2013 2013 ret = xpc_pull_remote_cachelines(part, msg, remote_msg, 2014 2014 nmsgs * ch->msg_size); 2015 - if (ret != xpcSuccess) { 2015 + if (ret != xpSuccess) { 2016 2016 2017 2017 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 2018 2018 " msg %ld from partition %d, channel=%d, " ··· 2112 2112 ch->number); 2113 2113 2114 2114 /* deliver the message to its intended recipient */ 2115 - ch->func(xpcMsgReceived, ch->partid, ch->number, 2115 + ch->func(xpMsgReceived, ch->partid, ch->number, 2116 2116 &msg->payload, ch->key); 2117 2117 2118 2118 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
+22 -22
drivers/misc/sgi-xp/xpc_main.c
··· 315 315 * the XPC per partition variables from the remote partition and waiting for 316 316 * the remote partition to pull ours. 317 317 */ 318 - static enum xpc_retval 318 + static enum xp_retval 319 319 xpc_make_first_contact(struct xpc_partition *part) 320 320 { 321 - enum xpc_retval ret; 321 + enum xp_retval ret; 322 322 323 - while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { 324 - if (ret != xpcRetry) { 323 + while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) { 324 + if (ret != xpRetry) { 325 325 XPC_DEACTIVATE_PARTITION(part, ret); 326 326 return ret; 327 327 } ··· 406 406 407 407 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); 408 408 409 - if (xpc_setup_infrastructure(part) != xpcSuccess) 409 + if (xpc_setup_infrastructure(part) != xpSuccess) 410 410 return; 411 411 412 412 /* ··· 418 418 419 419 (void)xpc_part_ref(part); /* this will always succeed */ 420 420 421 - if (xpc_make_first_contact(part) == xpcSuccess) 421 + if (xpc_make_first_contact(part) == xpSuccess) 422 422 xpc_channel_mgr(part); 423 423 424 424 xpc_part_deref(part); ··· 470 470 471 471 spin_lock_irqsave(&part->act_lock, irq_flags); 472 472 part->act_state = XPC_P_INACTIVE; 473 - XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); 473 + XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__); 474 474 spin_unlock_irqrestore(&part->act_lock, irq_flags); 475 475 part->remote_rp_pa = 0; 476 476 return 0; ··· 488 488 xpc_disallow_hb(partid, xpc_vars); 489 489 xpc_mark_partition_inactive(part); 490 490 491 - if (part->reason == xpcReactivating) { 491 + if (part->reason == xpReactivating) { 492 492 /* interrupting ourselves results in activating partition */ 493 493 xpc_IPI_send_reactivate(part); 494 494 } ··· 508 508 DBUG_ON(part->act_state != XPC_P_INACTIVE); 509 509 510 510 part->act_state = XPC_P_ACTIVATION_REQ; 511 - XPC_SET_REASON(part, xpcCloneKThread, __LINE__); 511 + XPC_SET_REASON(part, xpCloneKThread, __LINE__); 512 512 513 513 spin_unlock_irqrestore(&part->act_lock, irq_flags); 514 514 ··· 517 517 if (IS_ERR(kthread)) { 518 518 spin_lock_irqsave(&part->act_lock, irq_flags); 519 519 part->act_state = XPC_P_INACTIVE; 520 - XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); 520 + XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); 521 521 spin_unlock_irqrestore(&part->act_lock, irq_flags); 522 522 } 523 523 } ··· 696 696 ch->flags |= XPC_C_DISCONNECTINGCALLOUT; 697 697 spin_unlock_irqrestore(&ch->lock, irq_flags); 698 698 699 - xpc_disconnect_callout(ch, xpcDisconnecting); 699 + xpc_disconnect_callout(ch, xpDisconnecting); 700 700 701 701 spin_lock_irqsave(&ch->lock, irq_flags); 702 702 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; ··· 776 776 * then we'll deadlock if all other kthreads assigned 777 777 * to this channel are blocked in the channel's 778 778 * registerer, because the only thing that will unblock 779 - * them is the xpcDisconnecting callout that this 779 + * them is the xpDisconnecting callout that this 780 780 * failed kthread_run() would have made. 781 781 */ 782 782 ··· 796 796 * to function. 797 797 */ 798 798 spin_lock_irqsave(&ch->lock, irq_flags); 799 - XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 799 + XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, 800 800 &irq_flags); 801 801 spin_unlock_irqrestore(&ch->lock, irq_flags); 802 802 } ··· 857 857 } 858 858 859 859 static void 860 - xpc_do_exit(enum xpc_retval reason) 860 + xpc_do_exit(enum xp_retval reason) 861 861 { 862 862 partid_t partid; 863 863 int active_part_count, printed_waiting_msg = 0; ··· 955 955 del_timer_sync(&xpc_hb_timer); 956 956 DBUG_ON(xpc_vars->heartbeating_to_mask != 0); 957 957 958 - if (reason == xpcUnloading) { 958 + if (reason == xpUnloading) { 959 959 /* take ourselves off of the reboot_notifier_list */ 960 960 (void)unregister_reboot_notifier(&xpc_reboot_notifier); 961 961 ··· 981 981 static int 982 982 xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) 983 983 { 984 - enum xpc_retval reason; 984 + enum xp_retval reason; 985 985 986 986 switch (event) { 987 987 case SYS_RESTART: 988 - reason = xpcSystemReboot; 988 + reason = xpSystemReboot; 989 989 break; 990 990 case SYS_HALT: 991 - reason = xpcSystemHalt; 991 + reason = xpSystemHalt; 992 992 break; 993 993 case SYS_POWER_OFF: 994 - reason = xpcSystemPoweroff; 994 + reason = xpSystemPoweroff; 995 995 break; 996 996 default: 997 - reason = xpcSystemGoingDown; 997 + reason = xpSystemGoingDown; 998 998 } 999 999 1000 1000 xpc_do_exit(reason); ··· 1279 1279 /* mark this new thread as a non-starter */ 1280 1280 complete(&xpc_discovery_exited); 1281 1281 1282 - xpc_do_exit(xpcUnloading); 1282 + xpc_do_exit(xpUnloading); 1283 1283 return -EBUSY; 1284 1284 } 1285 1285 ··· 1297 1297 void __exit 1298 1298 xpc_exit(void) 1299 1299 { 1300 - xpc_do_exit(xpcUnloading); 1300 + xpc_do_exit(xpUnloading); 1301 1301 } 1302 1302 1303 1303 module_exit(xpc_exit);
+32 -32
drivers/misc/sgi-xp/xpc_partition.c
··· 444 444 (remote_vars->heartbeat_offline == 0)) || 445 445 !xpc_hb_allowed(sn_partition_id, remote_vars)) { 446 446 447 - XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); 447 + XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat); 448 448 continue; 449 449 } 450 450 ··· 459 459 * is large enough to contain a copy of their reserved page header and 460 460 * part_nasids mask. 461 461 */ 462 - static enum xpc_retval 462 + static enum xp_retval 463 463 xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 464 464 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 465 465 { ··· 469 469 470 470 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); 471 471 if (*remote_rp_pa == 0) 472 - return xpcNoRsvdPageAddr; 472 + return xpNoRsvdPageAddr; 473 473 474 474 /* pull over the reserved page header and part_nasids mask */ 475 475 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, ··· 489 489 490 490 if (remote_rp->partid < 1 || 491 491 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 492 - return xpcInvalidPartid; 492 + return xpInvalidPartid; 493 493 } 494 494 495 495 if (remote_rp->partid == sn_partition_id) 496 - return xpcLocalPartid; 496 + return xpLocalPartid; 497 497 498 498 if (XPC_VERSION_MAJOR(remote_rp->version) != 499 499 XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 500 - return xpcBadVersion; 500 + return xpBadVersion; 501 501 } 502 502 503 - return xpcSuccess; 503 + return xpSuccess; 504 504 } 505 505 506 506 /* ··· 509 509 * remote_vars points to a buffer that is cacheline aligned for BTE copies and 510 510 * assumed to be of size XPC_RP_VARS_SIZE. 511 511 */ 512 - static enum xpc_retval 512 + static enum xp_retval 513 513 xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) 514 514 { 515 515 int bres; 516 516 517 517 if (remote_vars_pa == 0) 518 - return xpcVarsNotSet; 518 + return xpVarsNotSet; 519 519 520 520 /* pull over the cross partition variables */ 521 521 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, ··· 525 525 526 526 if (XPC_VERSION_MAJOR(remote_vars->version) != 527 527 XPC_VERSION_MAJOR(XPC_V_VERSION)) { 528 - return xpcBadVersion; 528 + return xpBadVersion; 529 529 } 530 530 531 - return xpcSuccess; 531 + return xpSuccess; 532 532 } 533 533 534 534 /* ··· 606 606 struct timespec remote_rp_stamp = { 0, 0 }; 607 607 partid_t partid; 608 608 struct xpc_partition *part; 609 - enum xpc_retval ret; 609 + enum xp_retval ret; 610 610 611 611 /* pull over the reserved page structure */ 612 612 613 613 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; 614 614 615 615 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); 616 - if (ret != xpcSuccess) { 616 + if (ret != xpSuccess) { 617 617 dev_warn(xpc_part, "unable to get reserved page from nasid %d, " 618 618 "which sent interrupt, reason=%d\n", nasid, ret); 619 619 return; ··· 632 632 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; 633 633 634 634 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 635 - if (ret != xpcSuccess) { 635 + if (ret != xpSuccess) { 636 636 637 637 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 638 638 "which sent interrupt, reason=%d\n", nasid, ret); ··· 699 699 &remote_rp_stamp, remote_rp_pa, 700 700 remote_vars_pa, remote_vars); 701 701 part->reactivate_nasid = nasid; 702 - XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 702 + XPC_DEACTIVATE_PARTITION(part, xpReactivating); 703 703 return; 704 704 } 705 705 ··· 754 754 755 755 if (reactivate) { 756 756 part->reactivate_nasid = nasid; 757 - XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 757 + XPC_DEACTIVATE_PARTITION(part, xpReactivating); 758 758 759 759 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && 760 760 xpc_partition_disengage_requested(1UL << partid)) { 761 - XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); 761 + XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown); 762 762 } 763 763 } 764 764 ··· 870 870 /* 871 871 * Mark specified partition as active. 872 872 */ 873 - enum xpc_retval 873 + enum xp_retval 874 874 xpc_mark_partition_active(struct xpc_partition *part) 875 875 { 876 876 unsigned long irq_flags; 877 - enum xpc_retval ret; 877 + enum xp_retval ret; 878 878 879 879 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 880 880 881 881 spin_lock_irqsave(&part->act_lock, irq_flags); 882 882 if (part->act_state == XPC_P_ACTIVATING) { 883 883 part->act_state = XPC_P_ACTIVE; 884 - ret = xpcSuccess; 884 + ret = xpSuccess; 885 885 } else { 886 - DBUG_ON(part->reason == xpcSuccess); 886 + DBUG_ON(part->reason == xpSuccess); 887 887 ret = part->reason; 888 888 } 889 889 spin_unlock_irqrestore(&part->act_lock, irq_flags); ··· 896 896 */ 897 897 void 898 898 xpc_deactivate_partition(const int line, struct xpc_partition *part, 899 - enum xpc_retval reason) 899 + enum xp_retval reason) 900 900 { 901 901 unsigned long irq_flags; 902 902 ··· 905 905 if (part->act_state == XPC_P_INACTIVE) { 906 906 XPC_SET_REASON(part, reason, line); 907 907 spin_unlock_irqrestore(&part->act_lock, irq_flags); 908 - if (reason == xpcReactivating) { 908 + if (reason == xpReactivating) { 909 909 /* we interrupt ourselves to reactivate partition */ 910 910 xpc_IPI_send_reactivate(part); 911 911 } 912 912 return; 913 913 } 914 914 if (part->act_state == XPC_P_DEACTIVATING) { 915 - if ((part->reason == xpcUnloading && reason != xpcUnloading) || 916 - reason == xpcReactivating) { 915 + if ((part->reason == xpUnloading && reason != xpUnloading) || 916 + reason == xpReactivating) { 917 917 XPC_SET_REASON(part, reason, line); 918 918 } 919 919 spin_unlock_irqrestore(&part->act_lock, irq_flags); ··· 985 985 partid_t partid; 986 986 struct xpc_partition *part; 987 987 u64 *discovered_nasids; 988 - enum xpc_retval ret; 988 + enum xp_retval ret; 989 989 990 990 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 991 991 xp_nasid_mask_bytes, ··· 1063 1063 1064 1064 ret = xpc_get_remote_rp(nasid, discovered_nasids, 1065 1065 remote_rp, &remote_rp_pa); 1066 - if (ret != xpcSuccess) { 1066 + if (ret != xpSuccess) { 1067 1067 dev_dbg(xpc_part, "unable to get reserved page " 1068 1068 "from nasid %d, reason=%d\n", nasid, 1069 1069 ret); 1070 1070 1071 - if (ret == xpcLocalPartid) 1071 + if (ret == xpLocalPartid) 1072 1072 break; 1073 1073 1074 1074 continue; ··· 1082 1082 /* pull over the cross partition variables */ 1083 1083 1084 1084 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 1085 - if (ret != xpcSuccess) { 1085 + if (ret != xpSuccess) { 1086 1086 dev_dbg(xpc_part, "unable to get XPC variables " 1087 1087 "from nasid %d, reason=%d\n", nasid, 1088 1088 ret); ··· 1116 1116 "register xp_addr region 0x%016lx\n", 1117 1117 partid, remote_vars->amos_page_pa); 1118 1118 1119 - XPC_SET_REASON(part, xpcPhysAddrRegFailed, 1119 + XPC_SET_REASON(part, xpPhysAddrRegFailed, 1120 1120 __LINE__); 1121 1121 break; 1122 1122 } ··· 1151 1151 * Given a partid, get the nasids owned by that partition from the 1152 1152 * remote partition's reserved page. 1153 1153 */ 1154 - enum xpc_retval 1154 + enum xp_retval 1155 1155 xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) 1156 1156 { 1157 1157 struct xpc_partition *part; ··· 1160 1160 1161 1161 part = &xpc_partitions[partid]; 1162 1162 if (part->remote_rp_pa == 0) 1163 - return xpcPartitionDown; 1163 + return xpPartitionDown; 1164 1164 1165 1165 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 1166 1166
+9 -9
drivers/misc/sgi-xp/xpnet.c
··· 282 282 * state or message reception on a connection. 283 283 */ 284 284 static void 285 - xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, 285 + xpnet_connection_activity(enum xp_retval reason, partid_t partid, int channel, 286 286 void *data, void *key) 287 287 { 288 288 long bp; ··· 291 291 DBUG_ON(channel != XPC_NET_CHANNEL); 292 292 293 293 switch (reason) { 294 - case xpcMsgReceived: /* message received */ 294 + case xpMsgReceived: /* message received */ 295 295 DBUG_ON(data == NULL); 296 296 297 297 xpnet_receive(partid, channel, (struct xpnet_message *)data); 298 298 break; 299 299 300 - case xpcConnected: /* connection completed to a partition */ 300 + case xpConnected: /* connection completed to a partition */ 301 301 spin_lock_bh(&xpnet_broadcast_lock); 302 302 xpnet_broadcast_partitions |= 1UL << (partid - 1); 303 303 bp = xpnet_broadcast_partitions; ··· 330 330 static int 331 331 xpnet_dev_open(struct net_device *dev) 332 332 { 333 - enum xpc_retval ret; 333 + enum xp_retval ret; 334 334 335 335 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " 336 336 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, ··· 340 340 ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, 341 341 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, 342 342 XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); 343 - if (ret != xpcSuccess) { 343 + if (ret != xpSuccess) { 344 344 dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " 345 345 "ret=%d\n", dev->name, ret); 346 346 ··· 407 407 * release the skb and then release our pending message structure. 408 408 */ 409 409 static void 410 - xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, 410 + xpnet_send_completed(enum xp_retval reason, partid_t partid, int channel, 411 411 void *__qm) 412 412 { 413 413 struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; ··· 439 439 xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 440 440 { 441 441 struct xpnet_pending_msg *queued_msg; 442 - enum xpc_retval ret; 442 + enum xp_retval ret; 443 443 struct xpnet_message *msg; 444 444 u64 start_addr, end_addr; 445 445 long dp; ··· 528 528 529 529 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 530 530 XPC_NOWAIT, (void **)&msg); 531 - if (unlikely(ret != xpcSuccess)) 531 + if (unlikely(ret != xpSuccess)) 532 532 continue; 533 533 534 534 msg->embedded_bytes = embedded_bytes; ··· 557 557 558 558 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, 559 559 xpnet_send_completed, queued_msg); 560 - if (unlikely(ret != xpcSuccess)) { 560 + if (unlikely(ret != xpSuccess)) { 561 561 atomic_dec(&queued_msg->use_count); 562 562 continue; 563 563 }