summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c242
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c72
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c21
4 files changed, 171 insertions, 168 deletions
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index cf82fedae09..e53fc24c6af 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
/* Timeouts in microseconds. */
-#define IBF_RETRY_TIMEOUT 1000000
-#define OBF_RETRY_TIMEOUT 1000000
+#define IBF_RETRY_TIMEOUT 5000000
+#define OBF_RETRY_TIMEOUT 5000000
#define MAX_ERROR_RETRIES 10
#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c90e9390b78..2c29942b132 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -45,6 +45,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
#define PFX "IPMI message handler: "
@@ -52,6 +53,8 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(unsigned long);
+static void handle_new_recv_msgs(ipmi_smi_t intf);
static int initialized;
@@ -354,12 +357,15 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages that were delayed for some reason (out of memory,
- * for instance), will go in here to be processed later in a
- * periodic timer interrupt.
+ * Messages queued for delivery. If delivery fails (out of memory
+ * for instance), They will stay in here to be processed later in a
+ * periodic timer interrupt. The tasklet is for handling received
+ * messages directly from the handler.
*/
spinlock_t waiting_msgs_lock;
struct list_head waiting_msgs;
+ atomic_t watchdog_pretimeouts_to_deliver;
+ struct tasklet_struct recv_tasklet;
/*
* The list of command receivers that are registered for commands
@@ -492,6 +498,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
+ tasklet_kill(&intf->recv_tasklet);
+
free_smi_msg_list(&intf->waiting_msgs);
free_recv_msg_list(&intf->waiting_events);
@@ -2785,12 +2793,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
return;
}
-void ipmi_poll_interface(ipmi_user_t user)
+static void ipmi_poll(ipmi_smi_t intf)
{
- ipmi_smi_t intf = user->intf;
-
if (intf->handlers->poll)
intf->handlers->poll(intf->send_info);
+ /* In case something came in */
+ handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(ipmi_user_t user)
+{
+ ipmi_poll(user->intf);
}
EXPORT_SYMBOL(ipmi_poll_interface);
@@ -2859,6 +2872,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
#endif
spin_lock_init(&intf->waiting_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_msgs);
+ tasklet_init(&intf->recv_tasklet,
+ smi_recv_tasklet,
+ (unsigned long) intf);
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->events_lock);
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
@@ -3621,11 +3638,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
}
/*
- * Handle a new message. Return 1 if the message should be requeued,
+ * Handle a received message. Return 1 if the message should be requeued,
* 0 if the message should be freed, or -1 if the message should not
* be freed or requeued.
*/
-static int handle_new_recv_msg(ipmi_smi_t intf,
+static int handle_one_recv_msg(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
int requeue;
@@ -3783,12 +3800,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
return requeue;
}
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(ipmi_smi_t intf)
+{
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = intf->run_to_completion;
+
+ /* See if any waiting messages need to be processed. */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_msgs)) {
+ smi_msg = list_entry(intf->waiting_msgs.next,
+ struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ rv = handle_one_recv_msg(intf, smi_msg);
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ if (rv == 0) {
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ } else if (rv < 0) {
+ /* Fatal error on the message, del but don't free. */
+ } else {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message.
+ */
+ list_add(&smi_msg->link, &intf->waiting_msgs);
+ break;
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ ipmi_user_t user;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ rcu_read_unlock();
+ }
+}
+
+static void smi_recv_tasklet(unsigned long val)
+{
+ handle_new_recv_msgs((ipmi_smi_t) val);
+}
+
/* Handle a new message from the lower layer. */
void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int rv;
int run_to_completion;
@@ -3842,31 +3919,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
run_to_completion = intf->run_to_completion;
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- if (!list_empty(&intf->waiting_msgs)) {
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- goto out;
- }
+ list_add_tail(&msg->link, &intf->waiting_msgs);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- rv = handle_new_recv_msg(intf, msg);
- if (rv > 0) {
- /*
- * Could not handle the message now, just add it to a
- * list to handle later.
- */
- run_to_completion = intf->run_to_completion;
- if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- } else if (rv == 0) {
- ipmi_free_smi_msg(msg);
- }
-
+ tasklet_schedule(&intf->recv_tasklet);
out:
return;
}
@@ -3874,16 +3931,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
- ipmi_user_t user;
-
- rcu_read_lock();
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (!user->handler->ipmi_watchdog_pretimeout)
- continue;
-
- user->handler->ipmi_watchdog_pretimeout(user->handler_data);
- }
- rcu_read_unlock();
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+ tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -3997,28 +4046,12 @@ static void ipmi_timeout_handler(long timeout_period)
ipmi_smi_t intf;
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
- struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
int i;
rcu_read_lock();
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- /* See if any waiting messages need to be processed. */
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_for_each_entry_safe(smi_msg, smi_msg2,
- &intf->waiting_msgs, link) {
- if (!handle_new_recv_msg(intf, smi_msg)) {
- list_del(&smi_msg->link);
- ipmi_free_smi_msg(smi_msg);
- } else {
- /*
- * To preserve message order, quit if we
- * can't handle a message.
- */
- break;
- }
- }
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ tasklet_schedule(&intf->recv_tasklet);
/*
* Go through the seq table and find any messages that
@@ -4172,12 +4205,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
#ifdef CONFIG_IPMI_PANIC_EVENT
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
+ atomic_dec(&panic_done_count);
}
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
+ atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+ int rv;
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+ atomic_add(2, &panic_done_count);
+ rv = i_ipmi_request(NULL,
+ intf,
+ addr,
+ 0,
+ msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll(intf);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4216,8 +4285,6 @@ static void send_panic_events(char *str)
unsigned char data[16];
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
- struct ipmi_smi_msg smi_msg;
- struct ipmi_recv_msg recv_msg;
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4245,9 +4312,6 @@ static void send_panic_events(char *str)
data[7] = str[2];
}
- smi_msg.done = dummy_smi_done_handler;
- recv_msg.done = dummy_recv_done_handler;
-
/* For every registered interface, send the event. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (!intf->handlers)
@@ -4257,18 +4321,7 @@ static void send_panic_events(char *str)
intf->run_to_completion = 1;
/* Send the event announcing the panic. */
intf->handlers->set_run_to_completion(intf->send_info, 1);
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4316,18 +4369,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -4336,18 +4378,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -4404,18 +4435,7 @@ static void send_panic_events(char *str)
strncpy(data+5, p, 11);
p += size;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
#endif /* CONFIG_IPMI_PANIC_STRING */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f9fdc114b31..1e638fff40e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -170,7 +170,6 @@ struct smi_info {
struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
- spinlock_t msg_lock;
struct list_head xmit_msgs;
struct list_head hp_xmit_msgs;
struct ipmi_smi_msg *curr_msg;
@@ -319,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
- /* Deliver the message to the upper layer with the lock
- released. */
-
- if (smi_info->run_to_completion) {
- ipmi_smi_msg_received(smi_info->intf, msg);
- } else {
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
- }
+ /* Deliver the message to the upper layer. */
+ ipmi_smi_msg_received(smi_info->intf, msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -357,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
struct timeval t;
#endif
- /*
- * No need to save flags, we aleady have interrupts off and we
- * already hold the SMI lock.
- */
- if (!smi_info->run_to_completion)
- spin_lock(&(smi_info->msg_lock));
-
/* Pick the high priority queue first. */
if (!list_empty(&(smi_info->hp_xmit_msgs))) {
entry = smi_info->hp_xmit_msgs.next;
@@ -401,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
- if (!smi_info->run_to_completion)
- spin_unlock(&(smi_info->msg_lock));
-
return rv;
}
@@ -480,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info)
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
- spin_unlock(&(smi_info->si_lock));
ipmi_smi_watchdog_pretimeout(smi_info->intf);
- spin_lock(&(smi_info->si_lock));
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -888,19 +867,6 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
- /*
- * last_timeout_jiffies is updated here to avoid
- * smi_timeout() handler passing very large time_diff
- * value to smi_event_handler() that causes
- * the send command to abort.
- */
- smi_info->last_timeout_jiffies = jiffies;
-
- mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
-
- if (smi_info->thread)
- wake_up_process(smi_info->thread);
-
if (smi_info->run_to_completion) {
/*
* If we are running to completion, then throw it in
@@ -923,16 +889,29 @@ static void sender(void *send_info,
return;
}
- spin_lock_irqsave(&smi_info->msg_lock, flags);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
if (priority > 0)
list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
else
list_add_tail(&msg->link, &smi_info->xmit_msgs);
- spin_unlock_irqrestore(&smi_info->msg_lock, flags);
- spin_lock_irqsave(&smi_info->si_lock, flags);
- if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+ /*
+ * last_timeout_jiffies is updated here to avoid
+ * smi_timeout() handler passing very large time_diff
+ * value to smi_event_handler() that causes
+ * the send command to abort.
+ */
+ smi_info->last_timeout_jiffies = jiffies;
+
+ mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
start_next_msg(smi_info);
+ smi_event_handler(smi_info, 0);
+ }
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
@@ -1033,16 +1012,19 @@ static int ipmi_thread(void *data)
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
- unsigned long flags;
+ unsigned long flags = 0;
+ int run_to_completion = smi_info->run_to_completion;
/*
* Make sure there is some delay in the poll loop so we can
* drive time forward and timeout things.
*/
udelay(10);
- spin_lock_irqsave(&smi_info->si_lock, flags);
+ if (!run_to_completion)
+ spin_lock_irqsave(&smi_info->si_lock, flags);
smi_event_handler(smi_info, 10);
- spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void request_events(void *send_info)
@@ -1679,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void)
{
struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info) {
+ if (info)
spin_lock_init(&info->si_lock);
- spin_lock_init(&info->msg_lock);
- }
return info;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 020a6aec2d8..7ed356e5203 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -520,6 +520,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -528,8 +529,8 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_smi_msg,
&panic_halt_heartbeat_recv_msg,
1);
- if (!rv)
- atomic_add(2, &panic_done_count);
+ if (rv)
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -553,16 +554,18 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
+ atomic_add(2, &panic_done_count);
rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
- if (!rv) {
- atomic_add(2, &panic_done_count);
- if (send_heartbeat_now)
- panic_halt_ipmi_heartbeat();
- } else
+ if (rv) {
+ atomic_sub(2, &panic_done_count);
printk(KERN_WARNING PFX
"Unable to extend the watchdog timeout.");
+ } else {
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ }
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
}
@@ -1164,7 +1167,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
if (code == SYS_POWER_OFF || code == SYS_HALT) {
/* Disable the WDT if we are shutting down. */
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- panic_halt_ipmi_set_timeout();
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/* Set a long timer to let the reboot happens, but
reboot if it hangs, but only if the watchdog
@@ -1172,7 +1175,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
timeout = 120;
pretimeout = 0;
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
- panic_halt_ipmi_set_timeout();
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
}
}
return NOTIFY_OK;