Commit 393d2cc3 authored by Corey Minyard's avatar Corey Minyard Committed by Linus Torvalds

[PATCH] ipmi: use refcount in message handler

This patch is rather large, but it really can't be done in smaller chunks
easily and I believe it is an important change.  This has been out and tested
for a while in the latest IPMI driver release.  There are no functional
changes, just changes as necessary to convert the locking over (and a few
minor style updates).

The IPMI driver uses read/write locks to ensure that things exist while they
are in use.  This is bad from a number of points of view.  This patch removes
the rwlocks and uses refcounts and RCU lists to manage what the locks did.
Signed-off-by: default avatarCorey Minyard <minyard@acm.org>
Cc: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f5b3db00
...@@ -38,13 +38,13 @@ ...@@ -38,13 +38,13 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/ipmi.h> #include <linux/ipmi.h>
#include <linux/ipmi_smi.h> #include <linux/ipmi_smi.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#define PFX "IPMI message handler: " #define PFX "IPMI message handler: "
...@@ -65,10 +65,19 @@ struct proc_dir_entry *proc_ipmi_root = NULL; ...@@ -65,10 +65,19 @@ struct proc_dir_entry *proc_ipmi_root = NULL;
the max message timer. This is in milliseconds. */ the max message timer. This is in milliseconds. */
#define MAX_MSG_TIMEOUT 60000 #define MAX_MSG_TIMEOUT 60000
/*
* The main "user" data structure.
*/
struct ipmi_user struct ipmi_user
{ {
struct list_head link; struct list_head link;
/* Set to "0" when the user is destroyed. */
int valid;
struct kref refcount;
/* The upper layer that handles receive messages. */ /* The upper layer that handles receive messages. */
struct ipmi_user_hndl *handler; struct ipmi_user_hndl *handler;
void *handler_data; void *handler_data;
...@@ -87,6 +96,15 @@ struct cmd_rcvr ...@@ -87,6 +96,15 @@ struct cmd_rcvr
ipmi_user_t user; ipmi_user_t user;
unsigned char netfn; unsigned char netfn;
unsigned char cmd; unsigned char cmd;
/*
* This is used to form a linked lised during mass deletion.
* Since this is in an RCU list, we cannot use the link above
* or change any data until the RCU period completes. So we
* use this next variable during mass deletion so we can have
* a list and don't have to wait and restart the search on
* every individual deletion of a command. */
struct cmd_rcvr *next;
}; };
struct seq_table struct seq_table
...@@ -150,12 +168,10 @@ struct ipmi_smi ...@@ -150,12 +168,10 @@ struct ipmi_smi
/* What interface number are we? */ /* What interface number are we? */
int intf_num; int intf_num;
/* The list of upper layers that are using me. We read-lock struct kref refcount;
this when delivering messages to the upper layer to keep
the user from going away while we are processing the /* The list of upper layers that are using me. seq_lock
message. This means that you cannot add or delete a user * protects this. */
from the receive callback. */
rwlock_t users_lock;
struct list_head users; struct list_head users;
/* Used for wake ups at startup. */ /* Used for wake ups at startup. */
...@@ -193,7 +209,7 @@ struct ipmi_smi ...@@ -193,7 +209,7 @@ struct ipmi_smi
/* The list of command receivers that are registered for commands /* The list of command receivers that are registered for commands
on this interface. */ on this interface. */
rwlock_t cmd_rcvr_lock; spinlock_t cmd_rcvrs_lock;
struct list_head cmd_rcvrs; struct list_head cmd_rcvrs;
/* Events that were queues because no one was there to receive /* Events that were queues because no one was there to receive
...@@ -296,16 +312,17 @@ struct ipmi_smi ...@@ -296,16 +312,17 @@ struct ipmi_smi
unsigned int events; unsigned int events;
}; };
/* Used to mark an interface entry that cannot be used but is not a
* free entry, either, primarily used at creation and deletion time so
* a slot doesn't get reused too quickly. */
#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
|| (i == IPMI_INVALID_INTERFACE_ENTRY))
#define MAX_IPMI_INTERFACES 4 #define MAX_IPMI_INTERFACES 4
static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
/* Used to keep interfaces from going away while operations are /* Directly protects the ipmi_interfaces data structure. */
operating on interfaces. Grab read if you are not modifying the
interfaces, write if you are. */
static DECLARE_RWSEM(interfaces_sem);
/* Directly protects the ipmi_interfaces data structure. This is
claimed in the timer interrupt. */
static DEFINE_SPINLOCK(interfaces_lock); static DEFINE_SPINLOCK(interfaces_lock);
/* List of watchers that want to know when smi's are added and /* List of watchers that want to know when smi's are added and
...@@ -313,20 +330,73 @@ static DEFINE_SPINLOCK(interfaces_lock); ...@@ -313,20 +330,73 @@ static DEFINE_SPINLOCK(interfaces_lock);
static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
static DECLARE_RWSEM(smi_watchers_sem); static DECLARE_RWSEM(smi_watchers_sem);
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
list_for_each_entry_safe(msg, msg2, q, link) {
list_del(&msg->link);
ipmi_free_recv_msg(msg);
}
}
static void clean_up_interface_data(ipmi_smi_t intf)
{
int i;
struct cmd_rcvr *rcvr, *rcvr2;
unsigned long flags;
struct list_head list;
free_recv_msg_list(&intf->waiting_msgs);
free_recv_msg_list(&intf->waiting_events);
/* Wholesale remove all the entries from the list in the
* interface and wait for RCU to know that none are in use. */
spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
list_add_rcu(&list, &intf->cmd_rcvrs);
list_del_rcu(&intf->cmd_rcvrs);
spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
synchronize_rcu();
list_for_each_entry_safe(rcvr, rcvr2, &list, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if ((intf->seq_table[i].inuse)
&& (intf->seq_table[i].recv_msg))
{
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
}
static void intf_free(struct kref *ref)
{
ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{ {
int i; int i;
unsigned long flags;
down_read(&interfaces_sem);
down_write(&smi_watchers_sem); down_write(&smi_watchers_sem);
list_add(&(watcher->link), &smi_watchers); list_add(&(watcher->link), &smi_watchers);
up_write(&smi_watchers_sem);
spin_lock_irqsave(&interfaces_lock, flags);
for (i = 0; i < MAX_IPMI_INTERFACES; i++) { for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
if (ipmi_interfaces[i] != NULL) { ipmi_smi_t intf = ipmi_interfaces[i];
if (IPMI_INVALID_INTERFACE(intf))
continue;
spin_unlock_irqrestore(&interfaces_lock, flags);
watcher->new_smi(i); watcher->new_smi(i);
spin_lock_irqsave(&interfaces_lock, flags);
} }
} spin_unlock_irqrestore(&interfaces_lock, flags);
up_write(&smi_watchers_sem);
up_read(&interfaces_sem);
return 0; return 0;
} }
...@@ -471,8 +541,8 @@ static void deliver_response(struct ipmi_recv_msg *msg) ...@@ -471,8 +541,8 @@ static void deliver_response(struct ipmi_recv_msg *msg)
} }
ipmi_free_recv_msg(msg); ipmi_free_recv_msg(msg);
} else { } else {
msg->user->handler->ipmi_recv_hndl(msg, ipmi_user_t user = msg->user;
msg->user->handler_data); user->handler->ipmi_recv_hndl(msg, user->handler_data);
} }
} }
...@@ -662,15 +732,18 @@ int ipmi_create_user(unsigned int if_num, ...@@ -662,15 +732,18 @@ int ipmi_create_user(unsigned int if_num,
if (! new_user) if (! new_user)
return -ENOMEM; return -ENOMEM;
down_read(&interfaces_sem); spin_lock_irqsave(&interfaces_lock, flags);
if ((if_num >= MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL) intf = ipmi_interfaces[if_num];
{ if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
rv = -EINVAL; spin_unlock_irqrestore(&interfaces_lock, flags);
goto out_unlock; return -EINVAL;
} }
intf = ipmi_interfaces[if_num]; /* Note that each existing user holds a refcount to the interface. */
kref_get(&intf->refcount);
spin_unlock_irqrestore(&interfaces_lock, flags);
kref_init(&new_user->refcount);
new_user->handler = handler; new_user->handler = handler;
new_user->handler_data = handler_data; new_user->handler_data = handler_data;
new_user->intf = intf; new_user->intf = intf;
...@@ -678,98 +751,92 @@ int ipmi_create_user(unsigned int if_num, ...@@ -678,98 +751,92 @@ int ipmi_create_user(unsigned int if_num,
if (!try_module_get(intf->handlers->owner)) { if (!try_module_get(intf->handlers->owner)) {
rv = -ENODEV; rv = -ENODEV;
goto out_unlock; goto out_err;
} }
if (intf->handlers->inc_usecount) { if (intf->handlers->inc_usecount) {
rv = intf->handlers->inc_usecount(intf->send_info); rv = intf->handlers->inc_usecount(intf->send_info);
if (rv) { if (rv) {
module_put(intf->handlers->owner); module_put(intf->handlers->owner);
goto out_unlock; goto out_err;
} }
} }
write_lock_irqsave(&intf->users_lock, flags); new_user->valid = 1;
list_add_tail(&new_user->link, &intf->users); spin_lock_irqsave(&intf->seq_lock, flags);
write_unlock_irqrestore(&intf->users_lock, flags); list_add_rcu(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
out_unlock:
if (rv) {
kfree(new_user);
} else {
*user = new_user; *user = new_user;
} return 0;
up_read(&interfaces_sem); out_err:
kfree(new_user);
kref_put(&intf->refcount, intf_free);
return rv; return rv;
} }
static int ipmi_destroy_user_nolock(ipmi_user_t user) static void free_user(struct kref *ref)
{
ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
kfree(user);
}
int ipmi_destroy_user(ipmi_user_t user)
{ {
int rv = -ENODEV; int rv = -ENODEV;
ipmi_user_t t_user; ipmi_smi_t intf = user->intf;
struct cmd_rcvr *rcvr, *rcvr2;
int i; int i;
unsigned long flags; unsigned long flags;
struct cmd_rcvr *rcvr;
struct list_head *entry1, *entry2;
struct cmd_rcvr *rcvrs = NULL;
/* Find the user and delete them from the list. */ user->valid = 1;
list_for_each_entry(t_user, &(user->intf->users), link) {
if (t_user == user) {
list_del(&t_user->link);
rv = 0;
break;
}
}
if (rv) { /* Remove the user from the interface's sequence table. */
goto out_unlock; spin_lock_irqsave(&intf->seq_lock, flags);
} list_del_rcu(&user->link);
/* Remove the user from the interfaces sequence table. */
spin_lock_irqsave(&(user->intf->seq_lock), flags);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (user->intf->seq_table[i].inuse if (intf->seq_table[i].inuse
&& (user->intf->seq_table[i].recv_msg->user == user)) && (intf->seq_table[i].recv_msg->user == user))
{ {
user->intf->seq_table[i].inuse = 0; intf->seq_table[i].inuse = 0;
} }
} }
spin_unlock_irqrestore(&(user->intf->seq_lock), flags); spin_unlock_irqrestore(&intf->seq_lock, flags);
/* Remove the user from the command receiver's table. */ /*
write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); * Remove the user from the command receiver's table. First
list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) { * we build a list of everything (not using the standard link,
* since other things may be using it till we do
* synchronize_rcu()) then free everything in that list.
*/
spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) {
rcvr = list_entry(entry1, struct cmd_rcvr, link);
if (rcvr->user == user) { if (rcvr->user == user) {
list_del(&rcvr->link); list_del_rcu(&rcvr->link);
kfree(rcvr); rcvr->next = rcvrs;
rcvrs = rcvr;
} }
} }
write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
synchronize_rcu();
kfree(user); while (rcvrs) {
rcvr = rcvrs;
out_unlock: rcvrs = rcvr->next;
kfree(rcvr);
return rv; }
}
int ipmi_destroy_user(ipmi_user_t user)
{
int rv;
ipmi_smi_t intf = user->intf;
unsigned long flags;
down_read(&interfaces_sem);
write_lock_irqsave(&intf->users_lock, flags);
rv = ipmi_destroy_user_nolock(user);
if (!rv) {
module_put(intf->handlers->owner); module_put(intf->handlers->owner);
if (intf->handlers->dec_usecount) if (intf->handlers->dec_usecount)
intf->handlers->dec_usecount(intf->send_info); intf->handlers->dec_usecount(intf->send_info);
}
write_unlock_irqrestore(&intf->users_lock, flags); kref_put(&intf->refcount, intf_free);
up_read(&interfaces_sem);
kref_put(&user->refcount, free_user);
return rv; return rv;
} }
...@@ -824,61 +891,77 @@ int ipmi_get_my_LUN(ipmi_user_t user, ...@@ -824,61 +891,77 @@ int ipmi_get_my_LUN(ipmi_user_t user,
int ipmi_set_gets_events(ipmi_user_t user, int val) int ipmi_set_gets_events(ipmi_user_t user, int val)
{ {
unsigned long flags; unsigned long flags;
ipmi_smi_t intf = user->intf;
struct ipmi_recv_msg *msg, *msg2; struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
read_lock(&(user->intf->users_lock)); INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&(user->intf->events_lock), flags);
spin_lock_irqsave(&intf->events_lock, flags);
user->gets_events = val; user->gets_events = val;
if (val) { if (val) {
/* Deliver any queued events. */ /* Deliver any queued events. */
list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) { list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
list_del(&msg->link); list_del(&msg->link);
list_add_tail(&msg->link, &msgs);
}
}
/* Hold the events lock while doing this to preserve order. */
list_for_each_entry_safe(msg, msg2, &msgs, link) {
msg->user = user; msg->user = user;
kref_get(&user->refcount);
deliver_response(msg); deliver_response(msg);
} }
}
spin_unlock_irqrestore(&(user->intf->events_lock), flags); spin_unlock_irqrestore(&intf->events_lock, flags);
read_unlock(&(user->intf->users_lock));
return 0; return 0;
} }
static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
unsigned char netfn,
unsigned char cmd)
{
struct cmd_rcvr *rcvr;
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
return rcvr;
}
return NULL;
}
int ipmi_register_for_cmd(ipmi_user_t user, int ipmi_register_for_cmd(ipmi_user_t user,
unsigned char netfn, unsigned char netfn,
unsigned char cmd) unsigned char cmd)
{ {
struct cmd_rcvr *cmp; ipmi_smi_t intf = user->intf;
unsigned long flags;
struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvr;
struct cmd_rcvr *entry;
int rv = 0; int rv = 0;
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
if (! rcvr) if (! rcvr)
return -ENOMEM; return -ENOMEM;
read_lock(&(user->intf->users_lock));
write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
/* Make sure the command/netfn is not already registered. */
list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) {
if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
rv = -EBUSY;
break;
}
}
if (! rv) {
rcvr->cmd = cmd; rcvr->cmd = cmd;
rcvr->netfn = netfn; rcvr->netfn = netfn;
rcvr->user = user; rcvr->user = user;
list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
spin_lock_irq(&intf->cmd_rcvrs_lock);
/* Make sure the command/netfn is not already registered. */
entry = find_cmd_rcvr(intf, netfn, cmd);
if (entry) {
rv = -EBUSY;
goto out_unlock;
} }
write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
read_unlock(&(user->intf->users_lock));
out_unlock:
spin_unlock_irq(&intf->cmd_rcvrs_lock);
if (rv) if (rv)
kfree(rcvr); kfree(rcvr);
...@@ -889,31 +972,28 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, ...@@ -889,31 +972,28 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
unsigned char netfn, unsigned char netfn,
unsigned char cmd) unsigned char cmd)
{ {
unsigned long flags; ipmi_smi_t intf = user->intf;
struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvr;
int rv = -ENOENT;
read_lock(&(user->intf->users_lock)); spin_lock_irq(&intf->cmd_rcvrs_lock);
write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
/* Make sure the command/netfn is not already registered. */ /* Make sure the command/netfn is not already registered. */
list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) { rcvr = find_cmd_rcvr(intf, netfn, cmd);
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { if ((rcvr) && (rcvr->user == user)) {
rv = 0; list_del_rcu(&rcvr->link);
list_del(&rcvr->link); spin_unlock_irq(&intf->cmd_rcvrs_lock);
synchronize_rcu();
kfree(rcvr); kfree(rcvr);
break; return 0;
} } else {
spin_unlock_irq(&intf->cmd_rcvrs_lock);
return -ENOENT;
} }
write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
read_unlock(&(user->intf->users_lock));
return rv;
} }
void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
{ {
user->intf->handlers->set_run_to_completion(user->intf->send_info, ipmi_smi_t intf = user->intf;
val); intf->handlers->set_run_to_completion(intf->send_info, val);
} }
static unsigned char static unsigned char
...@@ -1010,7 +1090,7 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, ...@@ -1010,7 +1090,7 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
supplied in certain circumstances (mainly at panic time). If supplied in certain circumstances (mainly at panic time). If
messages are supplied, they will be freed, even if an error messages are supplied, they will be freed, even if an error
occurs. */ occurs. */
static inline int i_ipmi_request(ipmi_user_t user, static int i_ipmi_request(ipmi_user_t user,
ipmi_smi_t intf, ipmi_smi_t intf,
struct ipmi_addr *addr, struct ipmi_addr *addr,
long msgid, long msgid,
...@@ -1051,6 +1131,8 @@ static inline int i_ipmi_request(ipmi_user_t user, ...@@ -1051,6 +1131,8 @@ static inline int i_ipmi_request(ipmi_user_t user,
} }
recv_msg->user = user; recv_msg->user = user;
if (user)
kref_get(&user->refcount);
recv_msg->msgid = msgid; recv_msg->msgid = msgid;
/* Store the message to send in the receive message so timeout /* Store the message to send in the receive message so timeout
responses can get the proper response data. */ responses can get the proper response data. */
...@@ -1725,11 +1807,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, ...@@ -1725,11 +1807,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
unsigned char version_major, unsigned char version_major,
unsigned char version_minor, unsigned char version_minor,
unsigned char slave_addr, unsigned char slave_addr,
ipmi_smi_t *intf) ipmi_smi_t *new_intf)
{ {
int i, j; int i, j;
int rv; int rv;
ipmi_smi_t new_intf; ipmi_smi_t intf;
unsigned long flags; unsigned long flags;
...@@ -1745,189 +1827,142 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, ...@@ -1745,189 +1827,142 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
return -ENODEV; return -ENODEV;
} }
new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL); intf = kmalloc(sizeof(*intf), GFP_KERNEL);
if (!new_intf) if (!intf)
return -ENOMEM; return -ENOMEM;
memset(new_intf, 0, sizeof(*new_intf)); memset(intf, 0, sizeof(*intf));
intf->intf_num = -1;
new_intf->proc_dir = NULL; kref_init(&intf->refcount);
intf->version_major = version_major;
rv = -ENOMEM; intf->version_minor = version_minor;
down_write(&interfaces_sem);
for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
if (ipmi_interfaces[i] == NULL) {
new_intf->intf_num = i;
new_intf->version_major = version_major;
new_intf->version_minor = version_minor;
for (j = 0; j < IPMI_MAX_CHANNELS; j++) { for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
new_intf->channels[j].address intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
= IPMI_BMC_SLAVE_ADDR; intf->channels[j].lun = 2;
new_intf->channels[j].lun = 2;
} }
if (slave_addr != 0) if (slave_addr != 0)
new_intf->channels[0].address = slave_addr; intf->channels[0].address = slave_addr;
rwlock_init(&(new_intf->users_lock)); INIT_LIST_HEAD(&intf->users);
INIT_LIST_HEAD(&(new_intf->users)); intf->handlers = handlers;
new_intf->handlers = handlers; intf->send_info = send_info;
new_intf->send_info = send_info; spin_lock_init(&intf->seq_lock);
spin_lock_init(&(new_intf->seq_lock));
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
new_intf->seq_table[j].inuse = 0; intf->seq_table[j].inuse = 0;
new_intf->seq_table[j].seqid = 0; intf->seq_table[j].seqid = 0;
} }
new_intf->curr_seq = 0; intf->curr_seq = 0;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
spin_lock_init(&(new_intf->proc_entry_lock)); spin_lock_init(&intf->proc_entry_lock);
#endif #endif
spin_lock_init(&(new_intf->waiting_msgs_lock)); spin_lock_init(&intf->waiting_msgs_lock);
INIT_LIST_HEAD(&(new_intf->waiting_msgs)); INIT_LIST_HEAD(&intf->waiting_msgs);
spin_lock_init(&(new_intf->events_lock)); spin_lock_init(&intf->events_lock);
INIT_LIST_HEAD(&(new_intf->waiting_events)); INIT_LIST_HEAD(&intf->waiting_events);
new_intf->waiting_events_count = 0; intf->waiting_events_count = 0;
rwlock_init(&(new_intf->cmd_rcvr_lock)); spin_lock_init(&intf->cmd_rcvrs_lock);
init_waitqueue_head(&new_intf->waitq); INIT_LIST_HEAD(&intf->cmd_rcvrs);
INIT_LIST_HEAD(&(new_intf->cmd_rcvrs)); init_waitqueue_head(&intf->waitq);
spin_lock_init(&(new_intf->counter_lock)); spin_lock_init(&intf->counter_lock);
intf->proc_dir = NULL;
rv = -ENOMEM;
spin_lock_irqsave(&interfaces_lock, flags); spin_lock_irqsave(&interfaces_lock, flags);
ipmi_interfaces[i] = new_intf; for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
spin_unlock_irqrestore(&interfaces_lock, flags); if (ipmi_interfaces[i] == NULL) {
intf->intf_num = i;
/* Reserve the entry till we are done. */
ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
rv = 0; rv = 0;
*intf = new_intf;
break; break;
} }
} }
spin_unlock_irqrestore(&interfaces_lock, flags);
if (rv)
goto out;
downgrade_write(&interfaces_sem); /* FIXME - this is an ugly kludge, this sets the intf for the
caller before sending any messages with it. */
if (rv == 0) *new_intf = intf;
rv = add_proc_entries(*intf, i);
if (rv == 0) {
if ((version_major > 1) if ((version_major > 1)
|| ((version_major == 1) && (version_minor >= 5))) || ((version_major == 1) && (version_minor >= 5)))
{ {
/* Start scanning the channels to see what is /* Start scanning the channels to see what is
available. */ available. */
(*intf)->null_user_handler = channel_handler; intf->null_user_handler = channel_handler;
(*intf)->curr_channel = 0; intf->curr_channel = 0;
rv = send_channel_info_cmd(*intf, 0); rv = send_channel_info_cmd(intf, 0);
if (rv) if (rv)
goto out; goto out;
/* Wait for the channel info to be read. */ /* Wait for the channel info to be read. */
up_read(&interfaces_sem); wait_event(intf->waitq,
wait_event((*intf)->waitq, intf->curr_channel >= IPMI_MAX_CHANNELS);
((*intf)->curr_channel>=IPMI_MAX_CHANNELS));
down_read(&interfaces_sem);
if (ipmi_interfaces[i] != new_intf)
/* Well, it went away. Just return. */
goto out;
} else { } else {
/* Assume a single IPMB channel at zero. */ /* Assume a single IPMB channel at zero. */
(*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
(*intf)->channels[0].protocol intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
= IPMI_CHANNEL_PROTOCOL_IPMB;
} }
/* Call all the watcher interfaces to tell if (rv == 0)
them that a new interface is available. */ rv = add_proc_entries(intf, i);
call_smi_watchers(i);
}
out: out:
up_read(&interfaces_sem);
if (rv) { if (rv) {
if (new_intf->proc_dir) if (intf->proc_dir)
remove_proc_entries(new_intf); remove_proc_entries(intf);
kfree(new_intf); kref_put(&intf->refcount, intf_free);
} if (i < MAX_IPMI_INTERFACES) {
spin_lock_irqsave(&interfaces_lock, flags);
return rv; ipmi_interfaces[i] = NULL;
} spin_unlock_irqrestore(&interfaces_lock, flags);
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
list_for_each_entry_safe(msg, msg2, q, link) {
list_del(&msg->link);
ipmi_free_recv_msg(msg);
} }
} } else {
spin_lock_irqsave(&interfaces_lock, flags);
static void free_cmd_rcvr_list(struct list_head *q) ipmi_interfaces[i] = intf;
{ spin_unlock_irqrestore(&interfaces_lock, flags);
struct cmd_rcvr *rcvr, *rcvr2; call_smi_watchers(i);
list_for_each_entry_safe(rcvr, rcvr2, q, link) {
list_del(&rcvr->link);
kfree(rcvr);
} }
}
static void clean_up_interface_data(ipmi_smi_t intf)
{
int i;
free_recv_msg_list(&(intf->waiting_msgs));
free_recv_msg_list(&(intf->waiting_events));
free_cmd_rcvr_list(&(intf->cmd_rcvrs));
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { return rv;
if ((intf->seq_table[i].inuse)
&& (intf->seq_table[i].recv_msg))
{
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
} }
int ipmi_unregister_smi(ipmi_smi_t intf) int ipmi_unregister_smi(ipmi_smi_t intf)
{ {
int rv = -ENODEV;
int i; int i;
struct ipmi_smi_watcher *w; struct ipmi_smi_watcher *w;
unsigned long flags; unsigned long flags;
down_write(&interfaces_sem); spin_lock_irqsave(&interfaces_lock, flags);
if (list_empty(&(intf->users)))
{
for (i = 0; i < MAX_IPMI_INTERFACES; i++) { for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
if (ipmi_interfaces[i] == intf) { if (ipmi_interfaces[i] == intf) {
remove_proc_entries(intf); /* Set the interface number reserved until we
spin_lock_irqsave(&interfaces_lock, flags); * are done. */
ipmi_interfaces[i] = NULL; ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
clean_up_interface_data(intf); intf->intf_num = -1;
spin_unlock_irqrestore(&interfaces_lock,flags); break;
kfree(intf);
rv = 0;
goto out_call_watcher;
}
} }
} else {
rv = -EBUSY;
} }
up_write(&interfaces_sem); spin_unlock_irqrestore(&interfaces_lock,flags);
return rv; if (i == MAX_IPMI_INTERFACES)
return -ENODEV;
out_call_watcher: remove_proc_entries(intf);
downgrade_write(&interfaces_sem);
/* Call all the watcher interfaces to tell them that /* Call all the watcher interfaces to tell them that
an interface is gone. */ an interface is gone. */
down_read(&smi_watchers_sem); down_read(&smi_watchers_sem);
list_for_each_entry(w, &smi_watchers, link) { list_for_each_entry(w, &smi_watchers, link)
w->smi_gone(i); w->smi_gone(i);
}
up_read(&smi_watchers_sem); up_read(&smi_watchers_sem);
up_read(&interfaces_sem);
/* Allow the entry to be reused now. */
spin_lock_irqsave(&interfaces_lock, flags);
ipmi_interfaces[i] = NULL;
spin_unlock_irqrestore(&interfaces_lock,flags);
kref_put(&intf->refcount, intf_free);
return 0; return 0;
} }
...@@ -2023,16 +2058,14 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, ...@@ -2023,16 +2058,14 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
netfn = msg->rsp[4] >> 2; netfn = msg->rsp[4] >> 2;
cmd = msg->rsp[8]; cmd = msg->rsp[8];
read_lock(&(intf->cmd_rcvr_lock)); spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
rcvr = find_cmd_rcvr(intf, netfn, cmd);
/* Find the command/netfn. */ if (rcvr) {
list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
user = rcvr->user; user = rcvr->user;
break; kref_get(&user->refcount);
} } else
} user = NULL;
read_unlock(&(intf->cmd_rcvr_lock)); spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
if (user == NULL) { if (user == NULL) {
/* We didn't find a user, deliver an error response. */ /* We didn't find a user, deliver an error response. */
...@@ -2079,6 +2112,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, ...@@ -2079,6 +2112,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
message, so requeue it for handling message, so requeue it for handling
later. */ later. */
rv = 1; rv = 1;
kref_put(&user->refcount, free_user);
} else { } else {
/* Extract the source address from the data. */ /* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
...@@ -2204,19 +2238,17 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, ...@@ -2204,19 +2238,17 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
netfn = msg->rsp[6] >> 2; netfn = msg->rsp[6] >> 2;
cmd = msg->rsp[10]; cmd = msg->rsp[10];
read_lock(&(intf->cmd_rcvr_lock)); spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
rcvr = find_cmd_rcvr(intf, netfn, cmd);
/* Find the command/netfn. */ if (rcvr) {
list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
user = rcvr->user; user = rcvr->user;
break; kref_get(&user->refcount);
} } else
} user = NULL;
read_unlock(&(intf->cmd_rcvr_lock)); spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
if (user == NULL) { if (user == NULL) {
/* We didn't find a user, deliver an error response. */ /* We didn't find a user, just give up. */
spin_lock_irqsave(&intf->counter_lock, flags); spin_lock_irqsave(&intf->counter_lock, flags);
intf->unhandled_commands++; intf->unhandled_commands++;
spin_unlock_irqrestore(&intf->counter_lock, flags); spin_unlock_irqrestore(&intf->counter_lock, flags);
...@@ -2235,6 +2267,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, ...@@ -2235,6 +2267,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
message, so requeue it for handling message, so requeue it for handling
later. */ later. */
rv = 1; rv = 1;
kref_put(&user->refcount, free_user);
} else { } else {
/* Extract the source address from the data. */ /* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
...@@ -2286,8 +2319,6 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, ...@@ -2286,8 +2319,6 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
recv_msg->msg.data_len = msg->rsp_size - 3; recv_msg->msg.data_len = msg->rsp_size - 3;
} }
/* This will be called with the intf->users_lock read-locked, so no need
to do that here. */
static int handle_read_event_rsp(ipmi_smi_t intf, static int handle_read_event_rsp(ipmi_smi_t intf,
struct ipmi_smi_msg *msg) struct ipmi_smi_msg *msg)
{ {
...@@ -2313,7 +2344,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf, ...@@ -2313,7 +2344,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
INIT_LIST_HEAD(&msgs); INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&(intf->events_lock), flags); spin_lock_irqsave(&intf->events_lock, flags);
spin_lock(&intf->counter_lock); spin_lock(&intf->counter_lock);
intf->events++; intf->events++;
...@@ -2321,12 +2352,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf, ...@@ -2321,12 +2352,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
/* Allocate and fill in one message for every user that is getting /* Allocate and fill in one message for every user that is getting
events. */ events. */
list_for_each_entry(user, &(intf->users), link) { rcu_read_lock();
list_for_each_entry_rcu(user, &intf->users, link) {
if (! user->gets_events) if (! user->gets_events)
continue; continue;
recv_msg = ipmi_alloc_recv_msg(); recv_msg = ipmi_alloc_recv_msg();
if (! recv_msg) { if (! recv_msg) {
rcu_read_unlock();
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
list_del(&recv_msg->link); list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg); ipmi_free_recv_msg(recv_msg);
...@@ -2342,8 +2375,10 @@ static int handle_read_event_rsp(ipmi_smi_t intf, ...@@ -2342,8 +2375,10 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
copy_event_into_recv_msg(recv_msg, msg); copy_event_into_recv_msg(recv_msg, msg);
recv_msg->user = user; recv_msg->user = user;
kref_get(&user->refcount);
list_add_tail(&(recv_msg->link), &msgs); list_add_tail(&(recv_msg->link), &msgs);
} }
rcu_read_unlock();
if (deliver_count) { if (deliver_count) {
/* Now deliver all the messages. */ /* Now deliver all the messages. */
...@@ -2382,9 +2417,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf, ...@@ -2382,9 +2417,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
struct ipmi_smi_msg *msg) struct ipmi_smi_msg *msg)
{ {
struct ipmi_recv_msg *recv_msg; struct ipmi_recv_msg *recv_msg;
int found = 0;
struct ipmi_user *user;
unsigned long flags; unsigned long flags;
struct ipmi_user *user;
recv_msg = (struct ipmi_recv_msg *) msg->user_data; recv_msg = (struct ipmi_recv_msg *) msg->user_data;
if (recv_msg == NULL) if (recv_msg == NULL)
...@@ -2396,16 +2430,9 @@ static int handle_bmc_rsp(ipmi_smi_t intf, ...@@ -2396,16 +2430,9 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
return 0; return 0;
} }
user = recv_msg->user;
/* Make sure the user still exists. */ /* Make sure the user still exists. */
list_for_each_entry(user, &(intf->users), link) { if (user && !user->valid) {
if (user == recv_msg->user) {
/* Found it, so we can deliver it */
found = 1;
break;
}
}
if ((! found) && recv_msg->user) {
/* The user for the message went away, so give up. */ /* The user for the message went away, so give up. */
spin_lock_irqsave(&intf->counter_lock, flags); spin_lock_irqsave(&intf->counter_lock, flags);
intf->unhandled_local_responses++; intf->unhandled_local_responses++;
...@@ -2498,13 +2525,18 @@ static int handle_new_recv_msg(ipmi_smi_t intf, ...@@ -2498,13 +2525,18 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
/* Invalid channel number */ /* Invalid channel number */
goto out; goto out;
if (recv_msg) { if (!recv_msg)
goto out;
/* Make sure the user still exists. */
if (!recv_msg->user || !recv_msg->user->valid)
goto out;
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = 1; recv_msg->msg.data_len = 1;
recv_msg->msg_data[0] = msg->rsp[2]; recv_msg->msg_data[0] = msg->rsp[2];
deliver_response(recv_msg); deliver_response(recv_msg);
}
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) && (msg->rsp[1] == IPMI_GET_MSG_CMD))
{ {
...@@ -2570,14 +2602,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, ...@@ -2570,14 +2602,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
int rv; int rv;
/* Lock the user lock so the user can't go away while we are
working on it. */
read_lock(&(intf->users_lock));
if ((msg->data_size >= 2) if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD) && (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) { && (msg->user_data == NULL))
{
/* This is the local response to a command send, start /* This is the local response to a command send, start
the timer for these. The user_data will not be the timer for these. The user_data will not be
NULL if this is a response send, and we will let NULL if this is a response send, and we will let
...@@ -2612,46 +2641,46 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, ...@@ -2612,46 +2641,46 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
} }
ipmi_free_smi_msg(msg); ipmi_free_smi_msg(msg);
goto out_unlock; goto out;
} }
/* To preserve message order, if the list is not empty, we /* To preserve message order, if the list is not empty, we
tack this message onto the end of the list. */ tack this message onto the end of the list. */
spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
if (!list_empty(&(intf->waiting_msgs))) { if (!list_empty(&intf->waiting_msgs)) {
list_add_tail(&(msg->link), &(intf->waiting_msgs)); list_add_tail(&msg->link, &intf->waiting_msgs);
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); spin_unlock(&intf->waiting_msgs_lock);
goto out_unlock; goto out;
} }
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
rv = handle_new_recv_msg(intf, msg); rv = handle_new_recv_msg(intf, msg);
if (rv > 0) { if (rv > 0) {
/* Could not handle the message now, just add it to a /* Could not handle the message now, just add it to a
list to handle later. */ list to handle later. */
spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); spin_lock(&intf->waiting_msgs_lock);
list_add_tail(&(msg->link), &(intf->waiting_msgs)); list_add_tail(&msg->link, &intf->waiting_msgs);
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); spin_unlock(&intf->waiting_msgs_lock);
} else if (rv == 0) { } else if (rv == 0) {
ipmi_free_smi_msg(msg); ipmi_free_smi_msg(msg);
} }
out_unlock: out:
read_unlock(&(intf->users_lock)); return;
} }
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{ {
ipmi_user_t user; ipmi_user_t user;
read_lock(&(intf->users_lock)); rcu_read_lock();
list_for_each_entry(user, &(intf->users), link) { list_for_each_entry_rcu(user, &intf->users, link) {
if (! user->handler->ipmi_watchdog_pretimeout) if (! user->handler->ipmi_watchdog_pretimeout)
continue; continue;
user->handler->ipmi_watchdog_pretimeout(user->handler_data); user->handler->ipmi_watchdog_pretimeout(user->handler_data);
} }
read_unlock(&(intf->users_lock)); rcu_read_unlock();
} }
static void static void
...@@ -2691,63 +2720,28 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, ...@@ -2691,63 +2720,28 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
return smi_msg; return smi_msg;
} }
static void static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
ipmi_timeout_handler(long timeout_period) struct list_head *timeouts, long timeout_period,
int slot, unsigned long *flags)
{ {
ipmi_smi_t intf; struct ipmi_recv_msg *msg;
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
int i, j;
INIT_LIST_HEAD(&timeouts);
spin_lock(&interfaces_lock);
for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
intf = ipmi_interfaces[i];
if (intf == NULL)
continue;
read_lock(&(intf->users_lock));
/* See if any waiting messages need to be processed. */
spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) {
if (! handle_new_recv_msg(intf, smi_msg)) {
list_del(&smi_msg->link);
ipmi_free_smi_msg(smi_msg);
} else {
/* To preserve message order, quit if we
can't handle a message. */
break;
}
}
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
/* Go through the seq table and find any messages that
have timed out, putting them in the timeouts
list. */
spin_lock_irqsave(&(intf->seq_lock), flags);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
struct seq_table *ent = &(intf->seq_table[j]);
if (!ent->inuse) if (!ent->inuse)
continue; return;
ent->timeout -= timeout_period; ent->timeout -= timeout_period;
if (ent->timeout > 0) if (ent->timeout > 0)
continue; return;
if (ent->retries_left == 0) { if (ent->retries_left == 0) {
/* The message has used all its retries. */ /* The message has used all its retries. */
ent->inuse = 0; ent->inuse = 0;
msg = ent->recv_msg; msg = ent->recv_msg;
list_add_tail(&(msg->link), &timeouts); list_add_tail(&msg->link, timeouts);
spin_lock(&intf->counter_lock); spin_lock(&intf->counter_lock);
if (ent->broadcast) if (ent->broadcast)
intf->timed_out_ipmb_broadcasts++; intf->timed_out_ipmb_broadcasts++;
else if (ent->recv_msg->addr.addr_type else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
== IPMI_LAN_ADDR_TYPE)
intf->timed_out_lan_commands++; intf->timed_out_lan_commands++;
else else
intf->timed_out_ipmb_commands++; intf->timed_out_ipmb_commands++;
...@@ -2761,18 +2755,18 @@ ipmi_timeout_handler(long timeout_period) ...@@ -2761,18 +2755,18 @@ ipmi_timeout_handler(long timeout_period)
ent->timeout = MAX_MSG_TIMEOUT; ent->timeout = MAX_MSG_TIMEOUT;
ent->retries_left--; ent->retries_left--;
spin_lock(&intf->counter_lock); spin_lock(&intf->counter_lock);
if (ent->recv_msg->addr.addr_type if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
== IPMI_LAN_ADDR_TYPE)
intf->retransmitted_lan_commands++; intf->retransmitted_lan_commands++;
else else
intf->retransmitted_ipmb_commands++; intf->retransmitted_ipmb_commands++;
spin_unlock(&intf->counter_lock); spin_unlock(&intf->counter_lock);
smi_msg = smi_from_recv_msg(intf,
ent->recv_msg, j, ent->seqid); smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
if (! smi_msg) if (! smi_msg)
continue; return;
spin_unlock_irqrestore(&(intf->seq_lock),flags); spin_unlock_irqrestore(&intf->seq_lock, *flags);
/* Send the new message. We send with a zero /* Send the new message. We send with a zero
* priority. It timed out, I doubt time is * priority. It timed out, I doubt time is
* that critical now, and high priority * that critical now, and high priority
...@@ -2780,16 +2774,58 @@ ipmi_timeout_handler(long timeout_period) ...@@ -2780,16 +2774,58 @@ ipmi_timeout_handler(long timeout_period)
* local MC, which don't get resent. */ * local MC, which don't get resent. */
intf->handlers->sender(intf->send_info, intf->handlers->sender(intf->send_info,
smi_msg, 0); smi_msg, 0);
spin_lock_irqsave(&(intf->seq_lock), flags); spin_lock_irqsave(&intf->seq_lock, *flags);
} }
}
static void ipmi_timeout_handler(long timeout_period)
{
ipmi_smi_t intf;
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
int i, j;
INIT_LIST_HEAD(&timeouts);
spin_lock(&interfaces_lock);
for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
intf = ipmi_interfaces[i];
if (IPMI_INVALID_INTERFACE(intf))
continue;
kref_get(&intf->refcount);
spin_unlock(&interfaces_lock);
/* See if any waiting messages need to be processed. */
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
if (! handle_new_recv_msg(intf, smi_msg)) {
list_del(&smi_msg->link);
ipmi_free_smi_msg(smi_msg);
} else {
/* To preserve message order, quit if we
can't handle a message. */
break;
} }
spin_unlock_irqrestore(&(intf->seq_lock), flags); }
spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
list_for_each_entry_safe(msg, msg2, &timeouts, link) { /* Go through the seq table and find any messages that
have timed out, putting them in the timeouts
list. */
spin_lock_irqsave(&intf->seq_lock, flags);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
check_msg_timeout(intf, &(intf->seq_table[j]),
&timeouts, timeout_period, j,
&flags);
spin_unlock_irqrestore(&intf->seq_lock, flags);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
handle_msg_timeout(msg); handle_msg_timeout(msg);
}
read_unlock(&(intf->users_lock)); kref_put(&intf->refcount, intf_free);
spin_lock(&interfaces_lock);
} }
spin_unlock(&interfaces_lock); spin_unlock(&interfaces_lock);
} }
...@@ -2802,7 +2838,7 @@ static void ipmi_request_event(void) ...@@ -2802,7 +2838,7 @@ static void ipmi_request_event(void)
spin_lock(&interfaces_lock); spin_lock(&interfaces_lock);
for (i = 0; i < MAX_IPMI_INTERFACES; i++) { for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
intf = ipmi_interfaces[i]; intf = ipmi_interfaces[i];
if (intf == NULL) if (IPMI_INVALID_INTERFACE(intf))
continue; continue;
intf->handlers->request_events(intf->send_info); intf->handlers->request_events(intf->send_info);
...@@ -2884,6 +2920,13 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) ...@@ -2884,6 +2920,13 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
return rv; return rv;
} }
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
if (msg->user)
kref_put(&msg->user->refcount, free_user);
msg->done(msg);
}
#ifdef CONFIG_IPMI_PANIC_EVENT #ifdef CONFIG_IPMI_PANIC_EVENT
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
...@@ -2964,7 +3007,7 @@ static void send_panic_events(char *str) ...@@ -2964,7 +3007,7 @@ static void send_panic_events(char *str)
/* For every registered interface, send the event. */ /* For every registered interface, send the event. */
for (i = 0; i < MAX_IPMI_INTERFACES; i++) { for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
intf = ipmi_interfaces[i]; intf = ipmi_interfaces[i];
if (intf == NULL) if (IPMI_INVALID_INTERFACE(intf))
continue; continue;
/* Send the event announcing the panic. */ /* Send the event announcing the panic. */
...@@ -2995,7 +3038,7 @@ static void send_panic_events(char *str) ...@@ -2995,7 +3038,7 @@ static void send_panic_events(char *str)
int j; int j;
intf = ipmi_interfaces[i]; intf = ipmi_interfaces[i];
if (intf == NULL) if (IPMI_INVALID_INTERFACE(intf))
continue; continue;
/* First job here is to figure out where to send the /* First job here is to figure out where to send the
...@@ -3131,7 +3174,7 @@ static int panic_event(struct notifier_block *this, ...@@ -3131,7 +3174,7 @@ static int panic_event(struct notifier_block *this,
/* For every registered interface, set it to run to completion. */ /* For every registered interface, set it to run to completion. */
for (i = 0; i < MAX_IPMI_INTERFACES; i++) { for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
intf = ipmi_interfaces[i]; intf = ipmi_interfaces[i];
if (intf == NULL) if (IPMI_INVALID_INTERFACE(intf))
continue; continue;
intf->handlers->set_run_to_completion(intf->send_info, 1); intf->handlers->set_run_to_completion(intf->send_info, 1);
...@@ -3160,9 +3203,8 @@ static int ipmi_init_msghandler(void) ...@@ -3160,9 +3203,8 @@ static int ipmi_init_msghandler(void)
printk(KERN_INFO "ipmi message handler version " printk(KERN_INFO "ipmi message handler version "
IPMI_DRIVER_VERSION "\n"); IPMI_DRIVER_VERSION "\n");
for (i = 0; i < MAX_IPMI_INTERFACES; i++) { for (i = 0; i < MAX_IPMI_INTERFACES; i++)
ipmi_interfaces[i] = NULL; ipmi_interfaces[i] = NULL;
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
proc_ipmi_root = proc_mkdir("ipmi", NULL); proc_ipmi_root = proc_mkdir("ipmi", NULL);
...@@ -3258,3 +3300,4 @@ EXPORT_SYMBOL(ipmi_get_my_LUN); ...@@ -3258,3 +3300,4 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
EXPORT_SYMBOL(ipmi_smi_add_proc_entry); EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
EXPORT_SYMBOL(proc_ipmi_root); EXPORT_SYMBOL(proc_ipmi_root);
EXPORT_SYMBOL(ipmi_user_set_run_to_completion); EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
EXPORT_SYMBOL(ipmi_free_recv_msg);
...@@ -256,10 +256,7 @@ struct ipmi_recv_msg ...@@ -256,10 +256,7 @@ struct ipmi_recv_msg
}; };
/* Allocate and free the receive message. */ /* Allocate and free the receive message. */
static inline void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
{
msg->done(msg);
}
struct ipmi_user_hndl struct ipmi_user_hndl
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment