Commit fbd568a3 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Linus Torvalds

[PATCH] Change synchronize_kernel to _rcu and _sched

This patch changes calls to synchronize_kernel(), deprecated in the earlier
"Deprecate synchronize_kernel, GPL replacement" patch to instead call the new
synchronize_rcu() and synchronize_sched() APIs.
Signed-off-by: default avatarPaul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9b06e818
...@@ -36,7 +36,7 @@ static void timer_stop(void) ...@@ -36,7 +36,7 @@ static void timer_stop(void)
{ {
enable_timer_nmi_watchdog(); enable_timer_nmi_watchdog();
unset_nmi_callback(); unset_nmi_callback();
synchronize_kernel(); synchronize_sched(); /* Allow already-started NMIs to complete. */
} }
......
...@@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType ) ...@@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
/* We now sleep until all other CPUs have scheduled. This ensures that /* We now sleep until all other CPUs have scheduled. This ensures that
* the deletion is seen by all other CPUs, and that the deleted handler * the deletion is seen by all other CPUs, and that the deleted handler
* isn't still running on another CPU when we return. */ * isn't still running on another CPU when we return. */
synchronize_kernel(); synchronize_rcu();
} }
} }
return rc; return rc;
......
...@@ -838,7 +838,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr) ...@@ -838,7 +838,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr)
/* Fall back to the default idle loop */ /* Fall back to the default idle loop */
pm_idle = pm_idle_save; pm_idle = pm_idle_save;
synchronize_kernel(); synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
pr->flags.power = 0; pr->flags.power = 0;
result = acpi_processor_get_power_info(pr); result = acpi_processor_get_power_info(pr);
......
...@@ -2199,7 +2199,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) ...@@ -2199,7 +2199,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
/* Wait until we know that we are out of any interrupt /* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the handlers might have been running before we freed the
interrupt. */ interrupt. */
synchronize_kernel(); synchronize_sched();
if (new_smi->si_sm) { if (new_smi->si_sm) {
if (new_smi->handlers) if (new_smi->handlers)
...@@ -2312,7 +2312,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) ...@@ -2312,7 +2312,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
/* Wait until we know that we are out of any interrupt /* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the handlers might have been running before we freed the
interrupt. */ interrupt. */
synchronize_kernel(); synchronize_sched();
/* Wait for the timer to stop. This avoids problems with race /* Wait for the timer to stop. This avoids problems with race
conditions removing the timer here. */ conditions removing the timer here. */
......
...@@ -678,7 +678,7 @@ static void atkbd_disconnect(struct serio *serio) ...@@ -678,7 +678,7 @@ static void atkbd_disconnect(struct serio *serio)
atkbd_disable(atkbd); atkbd_disable(atkbd);
/* make sure we don't have a command in flight */ /* make sure we don't have a command in flight */
synchronize_kernel(); synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */
flush_scheduled_work(); flush_scheduled_work();
device_remove_file(&serio->dev, &atkbd_attr_extra); device_remove_file(&serio->dev, &atkbd_attr_extra);
......
...@@ -355,7 +355,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) ...@@ -355,7 +355,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
goto abort; goto abort;
} }
p->rdev = NULL; p->rdev = NULL;
synchronize_kernel(); synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) { if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */ /* lost the race, try later */
err = -EBUSY; err = -EBUSY;
......
...@@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) ...@@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
goto abort; goto abort;
} }
p->rdev = NULL; p->rdev = NULL;
synchronize_kernel(); synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) { if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */ /* lost the race, try later */
err = -EBUSY; err = -EBUSY;
......
...@@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) ...@@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
goto abort; goto abort;
} }
p->rdev = NULL; p->rdev = NULL;
synchronize_kernel(); synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) { if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */ /* lost the race, try later */
err = -EBUSY; err = -EBUSY;
......
...@@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) ...@@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
goto abort; goto abort;
} }
p->rdev = NULL; p->rdev = NULL;
synchronize_kernel(); synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) { if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */ /* lost the race, try later */
err = -EBUSY; err = -EBUSY;
......
...@@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number) ...@@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number)
goto abort; goto abort;
} }
p->rdev = NULL; p->rdev = NULL;
synchronize_kernel(); synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) { if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */ /* lost the race, try later */
err = -EBUSY; err = -EBUSY;
......
...@@ -2385,7 +2385,7 @@ core_down: ...@@ -2385,7 +2385,7 @@ core_down:
} }
/* Give a racing hard_start_xmit a few cycles to complete. */ /* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_kernel(); synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
/* /*
* And now for the 50k$ question: are IRQ disabled or not ? * And now for the 50k$ question: are IRQ disabled or not ?
......
...@@ -45,7 +45,7 @@ s390_register_adapter_interrupt (adapter_int_handler_t handler) ...@@ -45,7 +45,7 @@ s390_register_adapter_interrupt (adapter_int_handler_t handler)
else else
ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0); ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
if (!ret) if (!ret)
synchronize_kernel(); synchronize_sched(); /* Allow interrupts to complete. */
sprintf (dbf_txt, "ret:%d", ret); sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (4, dbf_txt); CIO_TRACE_EVENT (4, dbf_txt);
...@@ -65,7 +65,7 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler) ...@@ -65,7 +65,7 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
ret = -EINVAL; ret = -EINVAL;
else { else {
adapter_handler = NULL; adapter_handler = NULL;
synchronize_kernel(); synchronize_sched(); /* Allow interrupts to complete. */
ret = 0; ret = 0;
} }
sprintf (dbf_txt, "ret:%d", ret); sprintf (dbf_txt, "ret:%d", ret);
......
...@@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod, ...@@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod,
/* Init routine failed: abort. Try to protect us from /* Init routine failed: abort. Try to protect us from
buggy refcounters. */ buggy refcounters. */
mod->state = MODULE_STATE_GOING; mod->state = MODULE_STATE_GOING;
synchronize_kernel(); synchronize_sched();
if (mod->unsafe) if (mod->unsafe)
printk(KERN_ERR "%s: module is now stuck!\n", printk(KERN_ERR "%s: module is now stuck!\n",
mod->name); mod->name);
......
...@@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *)) ...@@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
WARN_ON(hook != timer_hook); WARN_ON(hook != timer_hook);
timer_hook = NULL; timer_hook = NULL;
/* make sure all CPUs see the NULL hook */ /* make sure all CPUs see the NULL hook */
synchronize_kernel(); synchronize_sched(); /* Allow ongoing interrupts to complete. */
} }
EXPORT_SYMBOL_GPL(register_timer_hook); EXPORT_SYMBOL_GPL(register_timer_hook);
......
...@@ -1666,7 +1666,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) ...@@ -1666,7 +1666,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
} }
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
synchronize_kernel(); synchronize_rcu();
/* no cpu_online check required here since we clear the percpu /* no cpu_online check required here since we clear the percpu
* array on cpu offline and set this to NULL. * array on cpu offline and set this to NULL.
......
...@@ -3091,7 +3091,7 @@ void free_netdev(struct net_device *dev) ...@@ -3091,7 +3091,7 @@ void free_netdev(struct net_device *dev)
void synchronize_net(void) void synchronize_net(void)
{ {
might_sleep(); might_sleep();
synchronize_kernel(); synchronize_rcu();
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment