Commit bf52fa4a authored by Doug Thompson's avatar Doug Thompson Committed by Linus Torvalds

drivers/edac: fix workq reset deadlock

Fix mutex locking deadlock on the device controller linked list.  Was calling
a lock then a function that could call the same lock.  Moved the cancel workq
function to outside the lock

Added some short circuit logic in the workq code

Added comments of description

Code tidying
Signed-off-by: default avatarDoug Thompson <dougthompson@xmission.com>
Cc: Greg KH <greg@kroah.com>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fb3fb206
...@@ -32,7 +32,9 @@ ...@@ -32,7 +32,9 @@
#include "edac_core.h" #include "edac_core.h"
#include "edac_module.h" #include "edac_module.h"
/* lock to memory controller's control array 'edac_device_list' */ /* lock for the list: 'edac_device_list', manipulation of this list
* is protected by the 'device_ctls_mutex' lock
*/
static DEFINE_MUTEX(device_ctls_mutex); static DEFINE_MUTEX(device_ctls_mutex);
static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list);
...@@ -386,6 +388,14 @@ EXPORT_SYMBOL_GPL(edac_device_find); ...@@ -386,6 +388,14 @@ EXPORT_SYMBOL_GPL(edac_device_find);
/* /*
* edac_device_workq_function * edac_device_workq_function
* performs the operation scheduled by a workq request * performs the operation scheduled by a workq request
*
* this workq is embedded within an edac_device_ctl_info
* structure, that needs to be polled for possible error events.
*
* This operation is to acquire the list mutex lock
* (thus preventing insertation or deletion)
* and then call the device's poll function IFF this device is
* running polled and there is a poll function defined.
*/ */
static void edac_device_workq_function(struct work_struct *work_req) static void edac_device_workq_function(struct work_struct *work_req)
{ {
...@@ -403,8 +413,17 @@ static void edac_device_workq_function(struct work_struct *work_req) ...@@ -403,8 +413,17 @@ static void edac_device_workq_function(struct work_struct *work_req)
mutex_unlock(&device_ctls_mutex); mutex_unlock(&device_ctls_mutex);
/* Reschedule */ /* Reschedule the workq for the next time period to start again
queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay); * if the number of msec is for 1 sec, then adjust to the next
* whole one second to save timers fireing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
round_jiffies(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
} }
/* /*
...@@ -417,11 +436,26 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, ...@@ -417,11 +436,26 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
{ {
debugf0("%s()\n", __func__); debugf0("%s()\n", __func__);
/* take the arg 'msec' and set it into the control structure
* to used in the time period calculation
* then calc the number of jiffies that represents
*/
edac_dev->poll_msec = msec; edac_dev->poll_msec = msec;
edac_dev->delay = msecs_to_jiffies(msec); /* Calc delay jiffies */ edac_dev->delay = msecs_to_jiffies(msec);
INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay);
/* optimize here for the 1 second case, which will be normal value, to
* fire ON the 1 second time event. This helps reduce all sorts of
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
round_jiffies(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
} }
/* /*
...@@ -441,16 +475,20 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) ...@@ -441,16 +475,20 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
/* /*
* edac_device_reset_delay_period * edac_device_reset_delay_period
*
* need to stop any outstanding workq queued up at this time
* because we will be resetting the sleep time.
* Then restart the workq on the new delay
*/ */
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
unsigned long value) unsigned long value)
{ {
mutex_lock(&device_ctls_mutex); /* cancel the current workq request, without the mutex lock */
/* cancel the current workq request */
edac_device_workq_teardown(edac_dev); edac_device_workq_teardown(edac_dev);
/* acquire the mutex before doing the workq setup */
mutex_lock(&device_ctls_mutex);
/* restart the workq request, with new delay value */ /* restart the workq request, with new delay value */
edac_device_workq_setup(edac_dev, value); edac_device_workq_setup(edac_dev, value);
......
...@@ -258,6 +258,12 @@ static void edac_mc_workq_function(struct work_struct *work_req) ...@@ -258,6 +258,12 @@ static void edac_mc_workq_function(struct work_struct *work_req)
mutex_lock(&mem_ctls_mutex); mutex_lock(&mem_ctls_mutex);
/* if this control struct has movd to offline state, we are done */
if (mci->op_state == OP_OFFLINE) {
mutex_unlock(&mem_ctls_mutex);
return;
}
/* Only poll controllers that are running polled and have a check */ /* Only poll controllers that are running polled and have a check */
if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
mci->edac_check(mci); mci->edac_check(mci);
...@@ -279,11 +285,19 @@ static void edac_mc_workq_function(struct work_struct *work_req) ...@@ -279,11 +285,19 @@ static void edac_mc_workq_function(struct work_struct *work_req)
* edac_mc_workq_setup * edac_mc_workq_setup
* initialize a workq item for this mci * initialize a workq item for this mci
* passing in the new delay period in msec * passing in the new delay period in msec
*
* locking model:
*
* called with the mem_ctls_mutex held
*/ */
void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
{ {
debugf0("%s()\n", __func__); debugf0("%s()\n", __func__);
/* if this instance is not in the POLL state, then simply return */
if (mci->op_state != OP_RUNNING_POLL)
return;
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
} }
...@@ -291,29 +305,39 @@ void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) ...@@ -291,29 +305,39 @@ void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
/* /*
* edac_mc_workq_teardown * edac_mc_workq_teardown
* stop the workq processing on this mci * stop the workq processing on this mci
*
* locking model:
*
* called WITHOUT lock held
*/ */
void edac_mc_workq_teardown(struct mem_ctl_info *mci) static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
{ {
int status; int status;
/* if not running POLL, leave now */
if (mci->op_state == OP_RUNNING_POLL) {
status = cancel_delayed_work(&mci->work); status = cancel_delayed_work(&mci->work);
if (status == 0) { if (status == 0) {
debugf0("%s() not canceled, flush the queue\n",
__func__);
/* workq instance might be running, wait for it */ /* workq instance might be running, wait for it */
flush_workqueue(edac_workqueue); flush_workqueue(edac_workqueue);
} }
}
} }
/* /*
* edac_reset_delay_period * edac_reset_delay_period
*/ */
static void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
{ {
mutex_lock(&mem_ctls_mutex);
/* cancel the current workq request */ /* cancel the current workq request */
edac_mc_workq_teardown(mci); edac_mc_workq_teardown(mci);
/* lock the list of devices for the new setup */
mutex_lock(&mem_ctls_mutex);
/* restart the workq request, with new delay value */ /* restart the workq request, with new delay value */
edac_mc_workq_setup(mci, value); edac_mc_workq_setup(mci, value);
...@@ -323,6 +347,10 @@ void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value) ...@@ -323,6 +347,10 @@ void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
/* Return 0 on success, 1 on failure. /* Return 0 on success, 1 on failure.
* Before calling this function, caller must * Before calling this function, caller must
* assign a unique value to mci->mc_idx. * assign a unique value to mci->mc_idx.
*
* locking model:
*
* called with the mem_ctls_mutex lock held
*/ */
static int add_mc_to_global_list(struct mem_ctl_info *mci) static int add_mc_to_global_list(struct mem_ctl_info *mci)
{ {
...@@ -331,7 +359,8 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci) ...@@ -331,7 +359,8 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
insert_before = &mc_devices; insert_before = &mc_devices;
if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL)) p = find_mci_by_dev(mci->dev);
if (unlikely(p != NULL))
goto fail0; goto fail0;
list_for_each(item, &mc_devices) { list_for_each(item, &mc_devices) {
...@@ -467,8 +496,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) ...@@ -467,8 +496,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
} }
/* Report action taken */ /* Report action taken */
edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n", edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
mci->mod_name, mci->ctl_name, dev_name(mci)); " DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci));
mutex_unlock(&mem_ctls_mutex); mutex_unlock(&mem_ctls_mutex);
return 0; return 0;
...@@ -493,10 +522,13 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) ...@@ -493,10 +522,13 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
{ {
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
debugf0("MC: %s()\n", __func__); debugf0("%s()\n", __func__);
mutex_lock(&mem_ctls_mutex); mutex_lock(&mem_ctls_mutex);
if ((mci = find_mci_by_dev(dev)) == NULL) { /* find the requested mci struct in the global list */
mci = find_mci_by_dev(dev);
if (mci == NULL) {
mutex_unlock(&mem_ctls_mutex); mutex_unlock(&mem_ctls_mutex);
return NULL; return NULL;
} }
...@@ -504,15 +536,17 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) ...@@ -504,15 +536,17 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
/* marking MCI offline */ /* marking MCI offline */
mci->op_state = OP_OFFLINE; mci->op_state = OP_OFFLINE;
/* flush workq processes */
edac_mc_workq_teardown(mci);
edac_remove_sysfs_mci_device(mci);
del_mc_from_global_list(mci); del_mc_from_global_list(mci);
mutex_unlock(&mem_ctls_mutex); mutex_unlock(&mem_ctls_mutex);
/* flush workq processes and remove sysfs */
edac_mc_workq_teardown(mci);
edac_remove_sysfs_mci_device(mci);
edac_printk(KERN_INFO, EDAC_MC, edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n", mci->mc_idx, "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
mci->mod_name, mci->ctl_name, dev_name(mci)); mci->mod_name, mci->ctl_name, dev_name(mci));
return mci; return mci;
} }
EXPORT_SYMBOL_GPL(edac_mc_del_mc); EXPORT_SYMBOL_GPL(edac_mc_del_mc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment