Merge branch 'akpm' (Andrew's patch-bomb)
[~shefty/rdma-dev.git] / drivers / char / ipmi / ipmi_si_intf.c
index f9fdc114b31dfbdba423491afe58004178693767..1e638fff40ea2cb01d99d33745ae5f6526787237 100644 (file)
@@ -170,7 +170,6 @@ struct smi_info {
        struct si_sm_handlers  *handlers;
        enum si_type           si_type;
        spinlock_t             si_lock;
-       spinlock_t             msg_lock;
        struct list_head       xmit_msgs;
        struct list_head       hp_xmit_msgs;
        struct ipmi_smi_msg    *curr_msg;
@@ -319,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
 static void deliver_recv_msg(struct smi_info *smi_info,
                             struct ipmi_smi_msg *msg)
 {
-       /* Deliver the message to the upper layer with the lock
-          released. */
-
-       if (smi_info->run_to_completion) {
-               ipmi_smi_msg_received(smi_info->intf, msg);
-       } else {
-               spin_unlock(&(smi_info->si_lock));
-               ipmi_smi_msg_received(smi_info->intf, msg);
-               spin_lock(&(smi_info->si_lock));
-       }
+       /* Deliver the message to the upper layer. */
+       ipmi_smi_msg_received(smi_info->intf, msg);
 }
 
 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -357,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
        struct timeval t;
 #endif
 
-       /*
-        * No need to save flags, we aleady have interrupts off and we
-        * already hold the SMI lock.
-        */
-       if (!smi_info->run_to_completion)
-               spin_lock(&(smi_info->msg_lock));
-
        /* Pick the high priority queue first. */
        if (!list_empty(&(smi_info->hp_xmit_msgs))) {
                entry = smi_info->hp_xmit_msgs.next;
@@ -401,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
                rv = SI_SM_CALL_WITHOUT_DELAY;
        }
  out:
-       if (!smi_info->run_to_completion)
-               spin_unlock(&(smi_info->msg_lock));
-
        return rv;
 }
 
@@ -480,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info)
 
                start_clear_flags(smi_info);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
-               spin_unlock(&(smi_info->si_lock));
                ipmi_smi_watchdog_pretimeout(smi_info->intf);
-               spin_lock(&(smi_info->si_lock));
        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
                /* Messages available. */
                smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -888,19 +867,6 @@ static void sender(void                *send_info,
        printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
-       /*
-        * last_timeout_jiffies is updated here to avoid
-        * smi_timeout() handler passing very large time_diff
-        * value to smi_event_handler() that causes
-        * the send command to abort.
-        */
-       smi_info->last_timeout_jiffies = jiffies;
-
-       mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
-
-       if (smi_info->thread)
-               wake_up_process(smi_info->thread);
-
        if (smi_info->run_to_completion) {
                /*
                 * If we are running to completion, then throw it in
@@ -923,16 +889,29 @@ static void sender(void                *send_info,
                return;
        }
 
-       spin_lock_irqsave(&smi_info->msg_lock, flags);
+       spin_lock_irqsave(&smi_info->si_lock, flags);
        if (priority > 0)
                list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
        else
                list_add_tail(&msg->link, &smi_info->xmit_msgs);
-       spin_unlock_irqrestore(&smi_info->msg_lock, flags);
 
-       spin_lock_irqsave(&smi_info->si_lock, flags);
-       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
+       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+               /*
+                * last_timeout_jiffies is updated here to avoid
+                * smi_timeout() handler passing very large time_diff
+                * value to smi_event_handler() that causes
+                * the send command to abort.
+                */
+               smi_info->last_timeout_jiffies = jiffies;
+
+               mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+               if (smi_info->thread)
+                       wake_up_process(smi_info->thread);
+
                start_next_msg(smi_info);
+               smi_event_handler(smi_info, 0);
+       }
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
 
@@ -1033,16 +1012,19 @@ static int ipmi_thread(void *data)
 static void poll(void *send_info)
 {
        struct smi_info *smi_info = send_info;
-       unsigned long flags;
+       unsigned long flags = 0;
+       int run_to_completion = smi_info->run_to_completion;
 
        /*
         * Make sure there is some delay in the poll loop so we can
         * drive time forward and timeout things.
         */
        udelay(10);
-       spin_lock_irqsave(&smi_info->si_lock, flags);
+       if (!run_to_completion)
+               spin_lock_irqsave(&smi_info->si_lock, flags);
        smi_event_handler(smi_info, 10);
-       spin_unlock_irqrestore(&smi_info->si_lock, flags);
+       if (!run_to_completion)
+               spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
 
 static void request_events(void *send_info)
@@ -1679,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void)
 {
        struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
 
-       if (info) {
+       if (info)
                spin_lock_init(&info->si_lock);
-               spin_lock_init(&info->msg_lock);
-       }
        return info;
 }