cpumask: alloc zeroed cpumask for static cpumask_var_ts [Linux 2.6.30]

This Linux kernel change "cpumask: alloc zeroed cpumask for static cpumask_var_ts" is included in the Linux 2.6.30 release. This change is authored by Yinghai Lu <yinghai [at] kernel.org> on Sat Jun 6 14:51:36 2009 -0700. The commit for this change in Linux stable tree is eaa9584 (patch).

cpumask: alloc zeroed cpumask for static cpumask_var_ts

These are defined as static cpumask_var_t so if MAXSMP is not used,
they are cleared already.  Avoid surprises when MAXSMP is enabled.

Signed-off-by: Yinghai Lu <[email protected]>
Signed-off-by: Rusty Russell <[email protected]>

There are 22 lines of Linux source code added/deleted in this change. Code changes to Linux kernel are as follows.

 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c       | 2 +-
 arch/x86/kernel/cpu/cpufreq/powernow-k7.c        | 2 +-
 arch/x86/kernel/cpu/cpufreq/powernow-k8.c        | 2 +-
 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 2 +-
 arch/x86/kernel/cpu/mcheck/mce_64.c              | 2 +-
 arch/x86/kernel/tlb_uv.c                         | 2 +-
 drivers/acpi/processor_core.c                    | 2 +-
 drivers/cpufreq/cpufreq.c                        | 2 +-
 kernel/sched_cpupri.c                            | 2 +-
 kernel/sched_rt.c                                | 2 +-
 kernel/smp.c                                     | 2 +-
 11 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 54b6de2..752e8c6 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -550,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void)
        return -ENOMEM;
    }
    for_each_possible_cpu(i) {
-       if (!alloc_cpumask_var_node(
+       if (!zalloc_cpumask_var_node(
            &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
            GFP_KERNEL, cpu_to_node(i))) {

diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index a8363e5..d47c775 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -322,7 +322,7 @@ static int powernow_acpi_init(void)
        goto err0;
    }

-   if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
+   if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
                                GFP_KERNEL)) {
        retval = -ENOMEM;
        goto err05;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 35dc8fb..cf52215 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -887,7 +887,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
    /* notify BIOS that we exist */
    acpi_processor_notify_smm(THIS_MODULE);

-   if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
+   if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
        printk(KERN_ERR PFX
                "unable to alloc powernow_k8_data cpumaskn");
        ret_val = -ENOMEM;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index c9f1fdc..55c831e 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy,

    if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
        return -ENOMEM;
-   if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
+   if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
        free_cpumask_var(saved_mask);
        return -ENOMEM;
    }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 6fb0b35..09dd1d4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -1163,7 +1163,7 @@ static __init int mce_init_device(void)
    if (!mce_available(&boot_cpu_data))
        return -EIO;

-   alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
+   zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);

    err = mce_init_banks();
    if (err)
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index ed0c337..8c7b03b 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -832,7 +832,7 @@ static int __init uv_bau_init(void)
        return 0;

    for_each_possible_cpu(cur_cpu)
-       alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
+       zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
                       GFP_KERNEL, cpu_to_node(cur_cpu));

    uv_bau_retry_limit = 1;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 45ad328..23f0fb8 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device)
    if (!pr)
        return -ENOMEM;

-   if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+   if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
        kfree(pr);
        return -ENOMEM;
    }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47d2ad0..6e2ec0b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        ret = -ENOMEM;
        goto nomem_out;
    }
-   if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+   if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
        free_cpumask_var(policy->cpus);
        kfree(policy);
        ret = -ENOMEM;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index cdd3c89..344712a 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
        vec->count = 0;
        if (bootmem)
            alloc_bootmem_cpumask_var(&vec->mask);
-       else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL))
+       else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
            goto cleanup;
    }

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f2c66f8..9bf0d2a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void)
    unsigned int i;

    for_each_possible_cpu(i)
-       alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+       zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
                    GFP_KERNEL, cpu_to_node(i));
 }
 #endif /* CONFIG_SMP */
diff --git a/kernel/smp.c b/kernel/smp.c
index 858baac..ad63d85 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -52,7 +52,7 @@ static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
    switch (action) {
    case CPU_UP_PREPARE:
    case CPU_UP_PREPARE_FROZEN:
-       if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+       if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
                cpu_to_node(cpu)))
            return NOTIFY_BAD;
        break;

Leave a Reply

Your email address will not be published. Required fields are marked *