summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-05-02 17:24:48 +0200
committerTejun Heo <tj@kernel.org>2011-05-02 17:24:48 +0200
commit2706a0bf7b02693ed88752df877f10c2206292ff (patch)
tree0d59965472ca83443defb291e909a585e553b6d6 /arch/x86/mm
parentc6f58878204b0414e00b43f9bcebf754284f95b4 (diff)
downloadlinux-stable-2706a0bf7b02693ed88752df877f10c2206292ff.tar.gz
linux-stable-2706a0bf7b02693ed88752df877f10c2206292ff.tar.bz2
linux-stable-2706a0bf7b02693ed88752df877f10c2206292ff.zip
x86, NUMA: Enable CONFIG_AMD_NUMA on 32bit too
Now that NUMA init path is unified, amdtopology can be enabled on 32bit. Make amdtopology.c safe on 32bit by explicitly using u64 and drop X86_64 dependency from Kconfig. Inclusion of bootmem.h is added for max_pfn declaration. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/amdtopology.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 0919c26820d4..5247d01329ca 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
@@ -69,10 +70,10 @@ static __init void early_get_boot_cpu_id(void)
int __init amd_numa_init(void)
{
- unsigned long start = PFN_PHYS(0);
- unsigned long end = PFN_PHYS(max_pfn);
+ u64 start = PFN_PHYS(0);
+ u64 end = PFN_PHYS(max_pfn);
unsigned numnodes;
- unsigned long prevbase;
+ u64 prevbase;
int i, j, nb;
u32 nodeid, reg;
unsigned int bits, cores, apicid_base;
@@ -95,7 +96,7 @@ int __init amd_numa_init(void)
prevbase = 0;
for (i = 0; i < 8; i++) {
- unsigned long base, limit;
+ u64 base, limit;
base = read_pci_config(0, nb, 1, 0x40 + i*8);
limit = read_pci_config(0, nb, 1, 0x44 + i*8);
@@ -107,18 +108,18 @@ int __init amd_numa_init(void)
continue;
}
if (nodeid >= numnodes) {
- pr_info("Ignoring excess node %d (%lx:%lx)\n", nodeid,
+ pr_info("Ignoring excess node %d (%Lx:%Lx)\n", nodeid,
base, limit);
continue;
}
if (!limit) {
- pr_info("Skipping node entry %d (base %lx)\n",
+ pr_info("Skipping node entry %d (base %Lx)\n",
i, base);
continue;
}
if ((base >> 8) & 3 || (limit >> 8) & 3) {
- pr_err("Node %d using interleaving mode %lx/%lx\n",
+ pr_err("Node %d using interleaving mode %Lx/%Lx\n",
nodeid, (base >> 8) & 3, (limit >> 8) & 3);
return -EINVAL;
}
@@ -150,19 +151,19 @@ int __init amd_numa_init(void)
continue;
}
if (limit < base) {
- pr_err("Node %d bogus settings %lx-%lx.\n",
+ pr_err("Node %d bogus settings %Lx-%Lx.\n",
nodeid, base, limit);
continue;
}
/* Could sort here, but pun for now. Should not happen anyroads. */
if (prevbase > base) {
- pr_err("Node map not sorted %lx,%lx\n",
+ pr_err("Node map not sorted %Lx,%Lx\n",
prevbase, base);
return -EINVAL;
}
- pr_info("Node %d MemBase %016lx Limit %016lx\n",
+ pr_info("Node %d MemBase %016Lx Limit %016Lx\n",
nodeid, base, limit);
prevbase = base;