Commit 65c17b80 authored by Dean Nelson's avatar Dean Nelson Committed by Linus Torvalds

drivers/misc/sgi-xp: clean up return values

Make XP return values more generic to XP and not so tied to XPC by changing
enum xpc_retval to xp_retval, along with changing return value prefixes from
xpc to xp.  Also, cleanup a comment block that referenced some of these return
values as well as the handling of BTE related return values.
Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Acked-by: default avatarRobin Holt <holt@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0cf942d7
This diff is collapsed.
...@@ -42,21 +42,21 @@ EXPORT_SYMBOL_GPL(xpc_registrations); ...@@ -42,21 +42,21 @@ EXPORT_SYMBOL_GPL(xpc_registrations);
/* /*
* Initialize the XPC interface to indicate that XPC isn't loaded. * Initialize the XPC interface to indicate that XPC isn't loaded.
*/ */
static enum xpc_retval static enum xp_retval
xpc_notloaded(void) xpc_notloaded(void)
{ {
return xpcNotLoaded; return xpNotLoaded;
} }
struct xpc_interface xpc_interface = { struct xpc_interface xpc_interface = {
(void (*)(int))xpc_notloaded, (void (*)(int))xpc_notloaded,
(void (*)(int))xpc_notloaded, (void (*)(int))xpc_notloaded,
(enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded, (enum xp_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
(enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded, (enum xp_retval(*)(partid_t, int, void *))xpc_notloaded,
(enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) (enum xp_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
xpc_notloaded, xpc_notloaded,
(void (*)(partid_t, int, void *))xpc_notloaded, (void (*)(partid_t, int, void *))xpc_notloaded,
(enum xpc_retval(*)(partid_t, void *))xpc_notloaded (enum xp_retval(*)(partid_t, void *))xpc_notloaded
}; };
EXPORT_SYMBOL_GPL(xpc_interface); EXPORT_SYMBOL_GPL(xpc_interface);
...@@ -66,12 +66,12 @@ EXPORT_SYMBOL_GPL(xpc_interface); ...@@ -66,12 +66,12 @@ EXPORT_SYMBOL_GPL(xpc_interface);
void void
xpc_set_interface(void (*connect) (int), xpc_set_interface(void (*connect) (int),
void (*disconnect) (int), void (*disconnect) (int),
enum xpc_retval (*allocate) (partid_t, int, u32, void **), enum xp_retval (*allocate) (partid_t, int, u32, void **),
enum xpc_retval (*send) (partid_t, int, void *), enum xp_retval (*send) (partid_t, int, void *),
enum xpc_retval (*send_notify) (partid_t, int, void *, enum xp_retval (*send_notify) (partid_t, int, void *,
xpc_notify_func, void *), xpc_notify_func, void *),
void (*received) (partid_t, int, void *), void (*received) (partid_t, int, void *),
enum xpc_retval (*partid_to_nasids) (partid_t, void *)) enum xp_retval (*partid_to_nasids) (partid_t, void *))
{ {
xpc_interface.connect = connect; xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect; xpc_interface.disconnect = disconnect;
...@@ -91,16 +91,16 @@ xpc_clear_interface(void) ...@@ -91,16 +91,16 @@ xpc_clear_interface(void)
{ {
xpc_interface.connect = (void (*)(int))xpc_notloaded; xpc_interface.connect = (void (*)(int))xpc_notloaded;
xpc_interface.disconnect = (void (*)(int))xpc_notloaded; xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32, xpc_interface.allocate = (enum xp_retval(*)(partid_t, int, u32,
void **))xpc_notloaded; void **))xpc_notloaded;
xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *)) xpc_interface.send = (enum xp_retval(*)(partid_t, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *, xpc_interface.send_notify = (enum xp_retval(*)(partid_t, int, void *,
xpc_notify_func, xpc_notify_func,
void *))xpc_notloaded; void *))xpc_notloaded;
xpc_interface.received = (void (*)(partid_t, int, void *)) xpc_interface.received = (void (*)(partid_t, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) xpc_interface.partid_to_nasids = (enum xp_retval(*)(partid_t, void *))
xpc_notloaded; xpc_notloaded;
} }
EXPORT_SYMBOL_GPL(xpc_clear_interface); EXPORT_SYMBOL_GPL(xpc_clear_interface);
...@@ -123,13 +123,13 @@ EXPORT_SYMBOL_GPL(xpc_clear_interface); ...@@ -123,13 +123,13 @@ EXPORT_SYMBOL_GPL(xpc_clear_interface);
* nentries - max #of XPC message entries a message queue can contain. * nentries - max #of XPC message entries a message queue can contain.
* The actual number, which is determined when a connection * The actual number, which is determined when a connection
* is established and may be less then requested, will be * is established and may be less then requested, will be
* passed to the user via the xpcConnected callout. * passed to the user via the xpConnected callout.
* assigned_limit - max number of kthreads allowed to be processing * assigned_limit - max number of kthreads allowed to be processing
* messages (per connection) at any given instant. * messages (per connection) at any given instant.
* idle_limit - max number of kthreads allowed to be idle at any given * idle_limit - max number of kthreads allowed to be idle at any given
* instant. * instant.
*/ */
enum xpc_retval enum xp_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit) u16 nentries, u32 assigned_limit, u32 idle_limit)
{ {
...@@ -143,12 +143,12 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -143,12 +143,12 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration = &xpc_registrations[ch_number]; registration = &xpc_registrations[ch_number];
if (mutex_lock_interruptible(&registration->mutex) != 0) if (mutex_lock_interruptible(&registration->mutex) != 0)
return xpcInterrupted; return xpInterrupted;
/* if XPC_CHANNEL_REGISTERED(ch_number) */ /* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) { if (registration->func != NULL) {
mutex_unlock(&registration->mutex); mutex_unlock(&registration->mutex);
return xpcAlreadyRegistered; return xpAlreadyRegistered;
} }
/* register the channel for connection */ /* register the channel for connection */
...@@ -163,7 +163,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -163,7 +163,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
xpc_interface.connect(ch_number); xpc_interface.connect(ch_number);
return xpcSuccess; return xpSuccess;
} }
EXPORT_SYMBOL_GPL(xpc_connect); EXPORT_SYMBOL_GPL(xpc_connect);
......
...@@ -412,7 +412,7 @@ struct xpc_channel { ...@@ -412,7 +412,7 @@ struct xpc_channel {
spinlock_t lock; /* lock for updating this structure */ spinlock_t lock; /* lock for updating this structure */
u32 flags; /* general flags */ u32 flags; /* general flags */
enum xpc_retval reason; /* reason why channel is disconnect'g */ enum xp_retval reason; /* reason why channel is disconnect'g */
int reason_line; /* line# disconnect initiated from */ int reason_line; /* line# disconnect initiated from */
u16 number; /* channel # */ u16 number; /* channel # */
...@@ -522,7 +522,7 @@ struct xpc_partition { ...@@ -522,7 +522,7 @@ struct xpc_partition {
spinlock_t act_lock; /* protect updating of act_state */ spinlock_t act_lock; /* protect updating of act_state */
u8 act_state; /* from XPC HB viewpoint */ u8 act_state; /* from XPC HB viewpoint */
u8 remote_vars_version; /* version# of partition's vars */ u8 remote_vars_version; /* version# of partition's vars */
enum xpc_retval reason; /* reason partition is deactivating */ enum xp_retval reason; /* reason partition is deactivating */
int reason_line; /* line# deactivation initiated from */ int reason_line; /* line# deactivation initiated from */
int reactivate_nasid; /* nasid in partition to reactivate */ int reactivate_nasid; /* nasid in partition to reactivate */
...@@ -646,31 +646,31 @@ extern void xpc_allow_IPI_ops(void); ...@@ -646,31 +646,31 @@ extern void xpc_allow_IPI_ops(void);
extern void xpc_restrict_IPI_ops(void); extern void xpc_restrict_IPI_ops(void);
extern int xpc_identify_act_IRQ_sender(void); extern int xpc_identify_act_IRQ_sender(void);
extern int xpc_partition_disengaged(struct xpc_partition *); extern int xpc_partition_disengaged(struct xpc_partition *);
extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
extern void xpc_mark_partition_inactive(struct xpc_partition *); extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_discovery(void); extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void); extern void xpc_check_remote_hb(void);
extern void xpc_deactivate_partition(const int, struct xpc_partition *, extern void xpc_deactivate_partition(const int, struct xpc_partition *,
enum xpc_retval); enum xp_retval);
extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); extern enum xp_retval xpc_initiate_partid_to_nasids(partid_t, void *);
/* found in xpc_channel.c */ /* found in xpc_channel.c */
extern void xpc_initiate_connect(int); extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int); extern void xpc_initiate_disconnect(int);
extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); extern enum xp_retval xpc_initiate_allocate(partid_t, int, u32, void **);
extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); extern enum xp_retval xpc_initiate_send(partid_t, int, void *);
extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, extern enum xp_retval xpc_initiate_send_notify(partid_t, int, void *,
xpc_notify_func, void *); xpc_notify_func, void *);
extern void xpc_initiate_received(partid_t, int, void *); extern void xpc_initiate_received(partid_t, int, void *);
extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *);
extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
extern void xpc_process_channel_activity(struct xpc_partition *); extern void xpc_process_channel_activity(struct xpc_partition *);
extern void xpc_connected_callout(struct xpc_channel *); extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *); extern void xpc_deliver_msg(struct xpc_channel *);
extern void xpc_disconnect_channel(const int, struct xpc_channel *, extern void xpc_disconnect_channel(const int, struct xpc_channel *,
enum xpc_retval, unsigned long *); enum xp_retval, unsigned long *);
extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *); extern void xpc_teardown_infrastructure(struct xpc_partition *);
static inline void static inline void
...@@ -901,7 +901,7 @@ xpc_IPI_receive(AMO_t *amo) ...@@ -901,7 +901,7 @@ xpc_IPI_receive(AMO_t *amo)
return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
} }
static inline enum xpc_retval static inline enum xp_retval
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
{ {
int ret = 0; int ret = 0;
...@@ -923,7 +923,7 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) ...@@ -923,7 +923,7 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
return ((ret == 0) ? xpcSuccess : xpcPioReadError); return ((ret == 0) ? xpSuccess : xpPioReadError);
} }
/* /*
...@@ -992,7 +992,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, ...@@ -992,7 +992,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
unsigned long *irq_flags) unsigned long *irq_flags)
{ {
struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition *part = &xpc_partitions[ch->partid];
enum xpc_retval ret; enum xp_retval ret;
if (likely(part->act_state != XPC_P_DEACTIVATING)) { if (likely(part->act_state != XPC_P_DEACTIVATING)) {
ret = xpc_IPI_send(part->remote_IPI_amo_va, ret = xpc_IPI_send(part->remote_IPI_amo_va,
...@@ -1001,7 +1001,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, ...@@ -1001,7 +1001,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
ipi_flag_string, ch->partid, ch->number, ret); ipi_flag_string, ch->partid, ch->number, ret);
if (unlikely(ret != xpcSuccess)) { if (unlikely(ret != xpSuccess)) {
if (irq_flags != NULL) if (irq_flags != NULL)
spin_unlock_irqrestore(&ch->lock, *irq_flags); spin_unlock_irqrestore(&ch->lock, *irq_flags);
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
...@@ -1123,41 +1123,10 @@ xpc_IPI_init(int index) ...@@ -1123,41 +1123,10 @@ xpc_IPI_init(int index)
return amo; return amo;
} }
static inline enum xpc_retval static inline enum xp_retval
xpc_map_bte_errors(bte_result_t error) xpc_map_bte_errors(bte_result_t error)
{ {
if (error == BTE_SUCCESS) return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError);
return xpcSuccess;
if (is_shub2()) {
if (BTE_VALID_SH2_ERROR(error))
return xpcBteSh2Start + error;
return xpcBteUnmappedError;
}
switch (error) {
case BTE_SUCCESS:
return xpcSuccess;
case BTEFAIL_DIR:
return xpcBteDirectoryError;
case BTEFAIL_POISON:
return xpcBtePoisonError;
case BTEFAIL_WERR:
return xpcBteWriteError;
case BTEFAIL_ACCESS:
return xpcBteAccessError;
case BTEFAIL_PWERR:
return xpcBtePWriteError;
case BTEFAIL_PRERR:
return xpcBtePReadError;
case BTEFAIL_TOUT:
return xpcBteTimeOutError;
case BTEFAIL_XTERR:
return xpcBteXtalkError;
case BTEFAIL_NOTAVAIL:
return xpcBteNotAvailable;
default:
return xpcBteUnmappedError;
}
} }
/* /*
......
This diff is collapsed.
...@@ -315,13 +315,13 @@ xpc_initiate_discovery(void *ignore) ...@@ -315,13 +315,13 @@ xpc_initiate_discovery(void *ignore)
* the XPC per partition variables from the remote partition and waiting for * the XPC per partition variables from the remote partition and waiting for
* the remote partition to pull ours. * the remote partition to pull ours.
*/ */
static enum xpc_retval static enum xp_retval
xpc_make_first_contact(struct xpc_partition *part) xpc_make_first_contact(struct xpc_partition *part)
{ {
enum xpc_retval ret; enum xp_retval ret;
while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
if (ret != xpcRetry) { if (ret != xpRetry) {
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
return ret; return ret;
} }
...@@ -406,7 +406,7 @@ xpc_partition_up(struct xpc_partition *part) ...@@ -406,7 +406,7 @@ xpc_partition_up(struct xpc_partition *part)
dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
if (xpc_setup_infrastructure(part) != xpcSuccess) if (xpc_setup_infrastructure(part) != xpSuccess)
return; return;
/* /*
...@@ -418,7 +418,7 @@ xpc_partition_up(struct xpc_partition *part) ...@@ -418,7 +418,7 @@ xpc_partition_up(struct xpc_partition *part)
(void)xpc_part_ref(part); /* this will always succeed */ (void)xpc_part_ref(part); /* this will always succeed */
if (xpc_make_first_contact(part) == xpcSuccess) if (xpc_make_first_contact(part) == xpSuccess)
xpc_channel_mgr(part); xpc_channel_mgr(part);
xpc_part_deref(part); xpc_part_deref(part);
...@@ -470,7 +470,7 @@ xpc_activating(void *__partid) ...@@ -470,7 +470,7 @@ xpc_activating(void *__partid)
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE; part->act_state = XPC_P_INACTIVE;
XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0; part->remote_rp_pa = 0;
return 0; return 0;
...@@ -488,7 +488,7 @@ xpc_activating(void *__partid) ...@@ -488,7 +488,7 @@ xpc_activating(void *__partid)
xpc_disallow_hb(partid, xpc_vars); xpc_disallow_hb(partid, xpc_vars);
xpc_mark_partition_inactive(part); xpc_mark_partition_inactive(part);
if (part->reason == xpcReactivating) { if (part->reason == xpReactivating) {
/* interrupting ourselves results in activating partition */ /* interrupting ourselves results in activating partition */
xpc_IPI_send_reactivate(part); xpc_IPI_send_reactivate(part);
} }
...@@ -508,7 +508,7 @@ xpc_activate_partition(struct xpc_partition *part) ...@@ -508,7 +508,7 @@ xpc_activate_partition(struct xpc_partition *part)
DBUG_ON(part->act_state != XPC_P_INACTIVE); DBUG_ON(part->act_state != XPC_P_INACTIVE);
part->act_state = XPC_P_ACTIVATION_REQ; part->act_state = XPC_P_ACTIVATION_REQ;
XPC_SET_REASON(part, xpcCloneKThread, __LINE__); XPC_SET_REASON(part, xpCloneKThread, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
...@@ -517,7 +517,7 @@ xpc_activate_partition(struct xpc_partition *part) ...@@ -517,7 +517,7 @@ xpc_activate_partition(struct xpc_partition *part)
if (IS_ERR(kthread)) { if (IS_ERR(kthread)) {
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE; part->act_state = XPC_P_INACTIVE;
XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
} }
} }
...@@ -696,7 +696,7 @@ xpc_kthread_start(void *args) ...@@ -696,7 +696,7 @@ xpc_kthread_start(void *args)
ch->flags |= XPC_C_DISCONNECTINGCALLOUT; ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_disconnect_callout(ch, xpcDisconnecting); xpc_disconnect_callout(ch, xpDisconnecting);
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
...@@ -776,7 +776,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -776,7 +776,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
* then we'll deadlock if all other kthreads assigned * then we'll deadlock if all other kthreads assigned
* to this channel are blocked in the channel's * to this channel are blocked in the channel's
* registerer, because the only thing that will unblock * registerer, because the only thing that will unblock
* them is the xpcDisconnecting callout that this * them is the xpDisconnecting callout that this
* failed kthread_run() would have made. * failed kthread_run() would have made.
*/ */
...@@ -796,7 +796,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -796,7 +796,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
* to function. * to function.
*/ */
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
&irq_flags); &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
} }
...@@ -857,7 +857,7 @@ xpc_disconnect_wait(int ch_number) ...@@ -857,7 +857,7 @@ xpc_disconnect_wait(int ch_number)
} }
static void static void
xpc_do_exit(enum xpc_retval reason) xpc_do_exit(enum xp_retval reason)
{ {
partid_t partid; partid_t partid;
int active_part_count, printed_waiting_msg = 0; int active_part_count, printed_waiting_msg = 0;
...@@ -955,7 +955,7 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -955,7 +955,7 @@ xpc_do_exit(enum xpc_retval reason)
del_timer_sync(&xpc_hb_timer); del_timer_sync(&xpc_hb_timer);
DBUG_ON(xpc_vars->heartbeating_to_mask != 0); DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
if (reason == xpcUnloading) { if (reason == xpUnloading) {
/* take ourselves off of the reboot_notifier_list */ /* take ourselves off of the reboot_notifier_list */
(void)unregister_reboot_notifier(&xpc_reboot_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier);
...@@ -981,20 +981,20 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -981,20 +981,20 @@ xpc_do_exit(enum xpc_retval reason)
static int static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{ {
enum xpc_retval reason; enum xp_retval reason;
switch (event) { switch (event) {
case SYS_RESTART: case SYS_RESTART:
reason = xpcSystemReboot; reason = xpSystemReboot;
break; break;
case SYS_HALT: case SYS_HALT:
reason = xpcSystemHalt; reason = xpSystemHalt;
break; break;
case SYS_POWER_OFF: case SYS_POWER_OFF:
reason = xpcSystemPoweroff; reason = xpSystemPoweroff;
break; break;
default: default:
reason = xpcSystemGoingDown; reason = xpSystemGoingDown;
} }
xpc_do_exit(reason); xpc_do_exit(reason);
...@@ -1279,7 +1279,7 @@ xpc_init(void) ...@@ -1279,7 +1279,7 @@ xpc_init(void)
/* mark this new thread as a non-starter */ /* mark this new thread as a non-starter */
complete(&xpc_discovery_exited); complete(&xpc_discovery_exited);
xpc_do_exit(xpcUnloading); xpc_do_exit(xpUnloading);
return -EBUSY; return -EBUSY;
} }
...@@ -1297,7 +1297,7 @@ module_init(xpc_init); ...@@ -1297,7 +1297,7 @@ module_init(xpc_init);
void __exit void __exit
xpc_exit(void) xpc_exit(void)
{ {
xpc_do_exit(xpcUnloading); xpc_do_exit(xpUnloading);
} }
module_exit(xpc_exit); module_exit(xpc_exit);
......
...@@ -444,7 +444,7 @@ xpc_check_remote_hb(void) ...@@ -444,7 +444,7 @@ xpc_check_remote_hb(void)
(remote_vars->heartbeat_offline == 0)) || (remote_vars->heartbeat_offline == 0)) ||
!xpc_hb_allowed(sn_partition_id, remote_vars)) { !xpc_hb_allowed(sn_partition_id, remote_vars)) {
XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
continue; continue;
} }
...@@ -459,7 +459,7 @@ xpc_check_remote_hb(void) ...@@ -459,7 +459,7 @@ xpc_check_remote_hb(void)
* is large enough to contain a copy of their reserved page header and * is large enough to contain a copy of their reserved page header and
* part_nasids mask. * part_nasids mask.
*/ */
static enum xpc_retval static enum xp_retval
xpc_get_remote_rp(int nasid, u64 *discovered_nasids, xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
{ {
...@@ -469,7 +469,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, ...@@ -469,7 +469,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid); *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
if (*remote_rp_pa == 0) if (*remote_rp_pa == 0)
return xpcNoRsvdPageAddr; return xpNoRsvdPageAddr;
/* pull over the reserved page header and part_nasids mask */ /* pull over the reserved page header and part_nasids mask */
bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
...@@ -489,18 +489,18 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, ...@@ -489,18 +489,18 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
if (remote_rp->partid < 1 || if (remote_rp->partid < 1 ||
remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
return xpcInvalidPartid; return xpInvalidPartid;
} }
if (remote_rp->partid == sn_partition_id) if (remote_rp->partid == sn_partition_id)
return xpcLocalPartid; return xpLocalPartid;
if (XPC_VERSION_MAJOR(remote_rp->version) != if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) { XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
return xpcBadVersion; return xpBadVersion;
} }
return xpcSuccess; return xpSuccess;
} }
/* /*
...@@ -509,13 +509,13 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, ...@@ -509,13 +509,13 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
* remote_vars points to a buffer that is cacheline aligned for BTE copies and * remote_vars points to a buffer that is cacheline aligned for BTE copies and
* assumed to be of size XPC_RP_VARS_SIZE. * assumed to be of size XPC_RP_VARS_SIZE.
*/ */
static enum xpc_retval static enum xp_retval
xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
{ {
int bres; int bres;
if (remote_vars_pa == 0) if (remote_vars_pa == 0)
return xpcVarsNotSet; return xpVarsNotSet;
/* pull over the cross partition variables */ /* pull over the cross partition variables */
bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
...@@ -525,10 +525,10 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) ...@@ -525,10 +525,10 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
if (XPC_VERSION_MAJOR(remote_vars->version) != if (XPC_VERSION_MAJOR(remote_vars->version) !=
XPC_VERSION_MAJOR(XPC_V_VERSION)) { XPC_VERSION_MAJOR(XPC_V_VERSION)) {
return xpcBadVersion; return xpBadVersion;
} }
return xpcSuccess; return xpSuccess;
} }
/* /*
...@@ -606,14 +606,14 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -606,14 +606,14 @@ xpc_identify_act_IRQ_req(int nasid)
struct timespec remote_rp_stamp = { 0, 0 }; struct timespec remote_rp_stamp = { 0, 0 };
partid_t partid; partid_t partid;
struct xpc_partition *part; struct xpc_partition *part;
enum xpc_retval ret; enum xp_retval ret;
/* pull over the reserved page structure */ /* pull over the reserved page structure */
remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
if (ret != xpcSuccess) { if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get reserved page from nasid %d, " dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret); "which sent interrupt, reason=%d\n", nasid, ret);
return; return;
...@@ -632,7 +632,7 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -632,7 +632,7 @@ xpc_identify_act_IRQ_req(int nasid)
remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
if (ret != xpcSuccess) { if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret); "which sent interrupt, reason=%d\n", nasid, ret);
...@@ -699,7 +699,7 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -699,7 +699,7 @@ xpc_identify_act_IRQ_req(int nasid)
&remote_rp_stamp, remote_rp_pa, &remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars); remote_vars_pa, remote_vars);
part->reactivate_nasid = nasid; part->reactivate_nasid = nasid;
XPC_DEACTIVATE_PARTITION(part, xpcReactivating); XPC_DEACTIVATE_PARTITION(part, xpReactivating);
return; return;
} }
...@@ -754,11 +754,11 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -754,11 +754,11 @@ xpc_identify_act_IRQ_req(int nasid)
if (reactivate) { if (reactivate) {
part->reactivate_nasid = nasid; part->reactivate_nasid = nasid;
XPC_DEACTIVATE_PARTITION(part, xpcReactivating); XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
xpc_partition_disengage_requested(1UL << partid)) { xpc_partition_disengage_requested(1UL << partid)) {
XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
} }
} }
...@@ -870,20 +870,20 @@ xpc_partition_disengaged(struct xpc_partition *part) ...@@ -870,20 +870,20 @@ xpc_partition_disengaged(struct xpc_partition *part)
/* /*
* Mark specified partition as active. * Mark specified partition as active.
*/ */
enum xpc_retval enum xp_retval
xpc_mark_partition_active(struct xpc_partition *part) xpc_mark_partition_active(struct xpc_partition *part)
{ {
unsigned long irq_flags; unsigned long irq_flags;
enum xpc_retval ret; enum xp_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_ACTIVATING) { if (part->act_state == XPC_P_ACTIVATING) {
part->act_state = XPC_P_ACTIVE; part->act_state = XPC_P_ACTIVE;
ret = xpcSuccess; ret = xpSuccess;
} else { } else {
DBUG_ON(part->reason == xpcSuccess); DBUG_ON(part->reason == xpSuccess);
ret = part->reason; ret = part->reason;
} }
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
...@@ -896,7 +896,7 @@ xpc_mark_partition_active(struct xpc_partition *part) ...@@ -896,7 +896,7 @@ xpc_mark_partition_active(struct xpc_partition *part)
*/ */
void void
xpc_deactivate_partition(const int line, struct xpc_partition *part, xpc_deactivate_partition(const int line, struct xpc_partition *part,
enum xpc_retval reason) enum xp_retval reason)
{ {
unsigned long irq_flags; unsigned long irq_flags;
...@@ -905,15 +905,15 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, ...@@ -905,15 +905,15 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
if (part->act_state == XPC_P_INACTIVE) { if (part->act_state == XPC_P_INACTIVE) {
XPC_SET_REASON(part, reason, line); XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpcReactivating) { if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */ /* we interrupt ourselves to reactivate partition */
xpc_IPI_send_reactivate(part); xpc_IPI_send_reactivate(part);
} }
return; return;
} }
if (part->act_state == XPC_P_DEACTIVATING) { if (part->act_state == XPC_P_DEACTIVATING) {
if ((part->reason == xpcUnloading && reason != xpcUnloading) || if ((part->reason == xpUnloading && reason != xpUnloading) ||
reason == xpcReactivating) { reason == xpReactivating) {
XPC_SET_REASON(part, reason, line); XPC_SET_REASON(part, reason, line);
} }
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
...@@ -985,7 +985,7 @@ xpc_discovery(void) ...@@ -985,7 +985,7 @@ xpc_discovery(void)
partid_t partid; partid_t partid;
struct xpc_partition *part; struct xpc_partition *part;
u64 *discovered_nasids; u64 *discovered_nasids;
enum xpc_retval ret; enum xp_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xp_nasid_mask_bytes, xp_nasid_mask_bytes,
...@@ -1063,12 +1063,12 @@ xpc_discovery(void) ...@@ -1063,12 +1063,12 @@ xpc_discovery(void)
ret = xpc_get_remote_rp(nasid, discovered_nasids, ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rp_pa); remote_rp, &remote_rp_pa);
if (ret != xpcSuccess) { if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get reserved page " dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid, "from nasid %d, reason=%d\n", nasid,
ret); ret);
if (ret == xpcLocalPartid) if (ret == xpLocalPartid)
break; break;
continue; continue;
...@@ -1082,7 +1082,7 @@ xpc_discovery(void) ...@@ -1082,7 +1082,7 @@ xpc_discovery(void)
/* pull over the cross partition variables */ /* pull over the cross partition variables */
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
if (ret != xpcSuccess) { if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get XPC variables " dev_dbg(xpc_part, "unable to get XPC variables "
"from nasid %d, reason=%d\n", nasid, "from nasid %d, reason=%d\n", nasid,
ret); ret);
...@@ -1116,7 +1116,7 @@ xpc_discovery(void) ...@@ -1116,7 +1116,7 @@ xpc_discovery(void)
"register xp_addr region 0x%016lx\n", "register xp_addr region 0x%016lx\n",
partid, remote_vars->amos_page_pa); partid, remote_vars->amos_page_pa);
XPC_SET_REASON(part, xpcPhysAddrRegFailed, XPC_SET_REASON(part, xpPhysAddrRegFailed,
__LINE__); __LINE__);
break; break;
} }
...@@ -1151,7 +1151,7 @@ xpc_discovery(void) ...@@ -1151,7 +1151,7 @@ xpc_discovery(void)
* Given a partid, get the nasids owned by that partition from the * Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page. * remote partition's reserved page.
*/ */
enum xpc_retval enum xp_retval
xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
{ {
struct xpc_partition *part; struct xpc_partition *part;
...@@ -1160,7 +1160,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) ...@@ -1160,7 +1160,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0) if (part->remote_rp_pa == 0)
return xpcPartitionDown; return xpPartitionDown;
memset(nasid_mask, 0, XP_NASID_MASK_BYTES); memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
......
...@@ -282,7 +282,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -282,7 +282,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
* state or message reception on a connection. * state or message reception on a connection.
*/ */
static void static void
xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, xpnet_connection_activity(enum xp_retval reason, partid_t partid, int channel,
void *data, void *key) void *data, void *key)
{ {
long bp; long bp;
...@@ -291,13 +291,13 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, ...@@ -291,13 +291,13 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
DBUG_ON(channel != XPC_NET_CHANNEL); DBUG_ON(channel != XPC_NET_CHANNEL);
switch (reason) { switch (reason) {
case xpcMsgReceived: /* message received */ case xpMsgReceived: /* message received */
DBUG_ON(data == NULL); DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *)data); xpnet_receive(partid, channel, (struct xpnet_message *)data);
break; break;
case xpcConnected: /* connection completed to a partition */ case xpConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock); spin_lock_bh(&xpnet_broadcast_lock);
xpnet_broadcast_partitions |= 1UL << (partid - 1); xpnet_broadcast_partitions |= 1UL << (partid - 1);
bp = xpnet_broadcast_partitions; bp = xpnet_broadcast_partitions;
...@@ -330,7 +330,7 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, ...@@ -330,7 +330,7 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
static int static int
xpnet_dev_open(struct net_device *dev) xpnet_dev_open(struct net_device *dev)
{ {
enum xpc_retval ret; enum xp_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
...@@ -340,7 +340,7 @@ xpnet_dev_open(struct net_device *dev) ...@@ -340,7 +340,7 @@ xpnet_dev_open(struct net_device *dev)
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
if (ret != xpcSuccess) { if (ret != xpSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret); "ret=%d\n", dev->name, ret);
...@@ -407,7 +407,7 @@ xpnet_dev_get_stats(struct net_device *dev) ...@@ -407,7 +407,7 @@ xpnet_dev_get_stats(struct net_device *dev)
* release the skb and then release our pending message structure. * release the skb and then release our pending message structure.
*/ */
static void static void
xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, xpnet_send_completed(enum xp_retval reason, partid_t partid, int channel,
void *__qm) void *__qm)
{ {
struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
...@@ -439,7 +439,7 @@ static int ...@@ -439,7 +439,7 @@ static int
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct xpnet_pending_msg *queued_msg; struct xpnet_pending_msg *queued_msg;
enum xpc_retval ret; enum xp_retval ret;
struct xpnet_message *msg; struct xpnet_message *msg;
u64 start_addr, end_addr; u64 start_addr, end_addr;
long dp; long dp;
...@@ -528,7 +528,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -528,7 +528,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
XPC_NOWAIT, (void **)&msg); XPC_NOWAIT, (void **)&msg);
if (unlikely(ret != xpcSuccess)) if (unlikely(ret != xpSuccess))
continue; continue;
msg->embedded_bytes = embedded_bytes; msg->embedded_bytes = embedded_bytes;
...@@ -557,7 +557,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -557,7 +557,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
xpnet_send_completed, queued_msg); xpnet_send_completed, queued_msg);
if (unlikely(ret != xpcSuccess)) { if (unlikely(ret != xpSuccess)) {
atomic_dec(&queued_msg->use_count); atomic_dec(&queued_msg->use_count);
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment