summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-17 17:00:20 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-17 17:00:20 -0800
commit55db493b65c7b6bb5d7bd3dd3c8a2fe13f5dc09c (patch)
tree7f9203f43e7c81687c9aaa0213266bc7b2e89e35
parentefc8e7f4c83dc85acbf5f54a8b1b24ae75b20aaa (diff)
parenta4636818f8e0991f32d9528f39cf4f3d6a7d30a3 (diff)
downloadlinux-3.10-55db493b65c7b6bb5d7bd3dd3c8a2fe13f5dc09c.tar.gz
linux-3.10-55db493b65c7b6bb5d7bd3dd3c8a2fe13f5dc09c.tar.bz2
linux-3.10-55db493b65c7b6bb5d7bd3dd3c8a2fe13f5dc09c.zip
Merge branch 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: cpumask: rename tsk_cpumask to tsk_cpus_allowed cpumask: don't recommend set_cpus_allowed hack in Documentation/cpu-hotplug.txt cpumask: avoid dereferencing struct cpumask cpumask: convert drivers/idle/i7300_idle.c to cpumask_var_t cpumask: use modern cpumask style in drivers/scsi/fcoe/fcoe.c cpumask: avoid deprecated function in mm/slab.c cpumask: use cpu_online in kernel/perf_event.c
-rw-r--r--Documentation/cpu-hotplug.txt49
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--drivers/idle/i7300_idle.c15
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/time/timer_list.c4
-rw-r--r--mm/slab.c2
8 files changed, 33 insertions, 45 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 4d4a644b505..a99d7031cdf 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -315,41 +315,26 @@ A: The following are what is required for CPU hotplug infrastructure to work
Q: I need to ensure that a particular cpu is not removed when there is some
work specific to this cpu is in progress.
-A: First switch the current thread context to preferred cpu
+A: There are two ways. If your code can be run in interrupt context, use
+ smp_call_function_single(), otherwise use work_on_cpu(). Note that
+ work_on_cpu() is slow, and can fail due to out of memory:
int my_func_on_cpu(int cpu)
{
- cpumask_t saved_mask, new_mask = CPU_MASK_NONE;
- int curr_cpu, err = 0;
-
- saved_mask = current->cpus_allowed;
- cpu_set(cpu, new_mask);
- err = set_cpus_allowed(current, new_mask);
-
- if (err)
- return err;
-
- /*
- * If we got scheduled out just after the return from
- * set_cpus_allowed() before running the work, this ensures
- * we stay locked.
- */
- curr_cpu = get_cpu();
-
- if (curr_cpu != cpu) {
- err = -EAGAIN;
- goto ret;
- } else {
- /*
- * Do work : But cant sleep, since get_cpu() disables preempt
- */
- }
- ret:
- put_cpu();
- set_cpus_allowed(current, saved_mask);
- return err;
- }
-
+ int err;
+ get_online_cpus();
+ if (!cpu_online(cpu))
+ err = -EINVAL;
+ else
+#if NEEDS_BLOCKING
+ err = work_on_cpu(cpu, __my_func_on_cpu, NULL);
+#else
+ smp_call_function_single(cpu, __my_func_on_cpu, &err,
+ true);
+#endif
+ put_online_cpus();
+ return err;
+ }
Q: How do we determine how many CPUs are available for hotplug.
A: There is no clear spec defined way from ACPI that can give us that
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index a9df9441a9a..f125e5c551c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1136,7 +1136,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
return -ENOMEM;
- cpumask_copy(oldmask, tsk_cpumask(current));
+ cpumask_copy(oldmask, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
if (smp_processor_id() != pol->cpu) {
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 1f20a042a4f..dd253002cd5 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
static u8 i7300_idle_thrtlow_saved;
static u32 i7300_idle_mc_saved;
-static cpumask_t idle_cpumask;
+static cpumask_var_t idle_cpumask;
static ktime_t start_ktime;
static unsigned long avg_idle_us;
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
spin_lock_irqsave(&i7300_idle_lock, flags);
if (val == IDLE_START) {
- cpu_set(smp_processor_id(), idle_cpumask);
+ cpumask_set_cpu(smp_processor_id(), idle_cpumask);
- if (cpus_weight(idle_cpumask) != num_online_cpus())
+ if (cpumask_weight(idle_cpumask) != num_online_cpus())
goto end;
now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
i7300_idle_ioat_start();
} else if (val == IDLE_END) {
- cpu_clear(smp_processor_id(), idle_cpumask);
- if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
+ cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
+ if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
/* First CPU coming out of idle */
u64 idle_duration_us;
@@ -553,7 +553,6 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
spin_lock_init(&i7300_idle_lock);
- cpus_clear(idle_cpumask);
total_us = 0;
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
if (i7300_idle_ioat_init())
return -ENODEV;
+ if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
if (debugfs_dir) {
int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
static void __exit i7300_idle_exit(void)
{
idle_notifier_unregister(&i7300_idle_nb);
+ free_cpumask_var(idle_cpumask);
if (debugfs_dir) {
int i = 0;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index e3896fcb06e..10be9f36a4c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1260,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
"CPU.\n");
spin_unlock_bh(&fps->fcoe_rx_list.lock);
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_first(cpu_online_mask);
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
if (!fps->thread) {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 211ed32befb..e89857812be 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1553,7 +1553,7 @@ struct task_struct {
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 8ab86988bd2..97d1a3dd7a5 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1614,7 +1614,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
- if (!cpu_isset(cpu, cpu_online_map))
+ if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = &per_cpu(perf_cpu_context, cpu);
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 28265636b6c..bdfb8dd1050 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -237,10 +237,10 @@ static void timer_list_show_tickdevices(struct seq_file *m)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
print_tickdevice(m, tick_get_broadcast_device(), -1);
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
- tick_get_broadcast_mask()->bits[0]);
+ cpumask_bits(tick_get_broadcast_mask())[0]);
#ifdef CONFIG_TICK_ONESHOT
SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
- tick_get_broadcast_oneshot_mask()->bits[0]);
+ cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
#endif
SEQ_printf(m, "\n");
#endif
diff --git a/mm/slab.c b/mm/slab.c
index e17cc2c337b..7d41f15b48d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1132,7 +1132,7 @@ static void __cpuinit cpuup_canceled(long cpu)
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
- if (!cpus_empty(*mask)) {
+ if (!cpumask_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}