summaryrefslogtreecommitdiff
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-26 21:39:17 +0100
committerIngo Molnar <mingo@elte.hu>2009-03-27 17:28:43 +0100
commit6e15cf04860074ad032e88c306bea656bbdd0f22 (patch)
treec346383bb7563e8d66b2f4a502f875b259c34870 /include/linux/percpu.h
parentbe0ea69674ed95e1e98cb3687a241badc756d228 (diff)
parent60db56422043aaa455ac7f858ce23c273220f9d9 (diff)
downloadlinux-3.10-6e15cf04860074ad032e88c306bea656bbdd0f22.tar.gz
linux-3.10-6e15cf04860074ad032e88c306bea656bbdd0f22.tar.bz2
linux-3.10-6e15cf04860074ad032e88c306bea656bbdd0f22.zip
Merge branch 'core/percpu' into percpu-cpumask-x86-for-linus-2
Conflicts: arch/parisc/kernel/irq.c arch/x86/include/asm/fixmap_64.h arch/x86/include/asm/setup.h kernel/irq/handle.c Semantic merge: arch/x86/include/asm/fixmap.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h159
1 files changed, 107 insertions, 52 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9f2a3751873..ee5615d6521 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,53 +5,66 @@
#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
#include <linux/cpumask.h>
+#include <linux/pfn.h>
#include <asm/percpu.h>
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data.percpu"
+#else
+#define PER_CPU_BASE_SECTION ".data"
+#endif
+#endif
+
#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#ifdef MODULE
-#define SHARED_ALIGNED_SECTION ".data.percpu"
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
#else
-#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
#endif
+#define PER_CPU_FIRST_SECTION ".first"
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- __attribute__((__section__(SHARED_ALIGNED_SECTION))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
- ____cacheline_aligned_in_smp
+#else
+
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_FIRST_SECTION ""
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- __attribute__((__section__(".data.percpu.page_aligned"))) \
+#endif
+
+#define DEFINE_PER_CPU_SECTION(type, name, section) \
+ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-#else
+
#define DEFINE_PER_CPU(type, name) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+ DEFINE_PER_CPU_SECTION(type, name, "")
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-#endif
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
+
+#define DEFINE_PER_CPU_FIRST(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
-#ifndef PERCPU_ENOUGH_ROOM
+/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#define PERCPU_MODULE_RESERVE 8192
+#define PERCPU_MODULE_RESERVE (8 << 10)
#else
-#define PERCPU_MODULE_RESERVE 0
+#define PERCPU_MODULE_RESERVE 0
#endif
+#ifndef PERCPU_ENOUGH_ROOM
#define PERCPU_ENOUGH_ROOM \
- (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
-#endif /* PERCPU_ENOUGH_ROOM */
+ (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+ PERCPU_MODULE_RESERVE)
+#endif
/*
* Must be an lvalue. Since @var must be a simple identifier,
@@ -65,52 +78,94 @@
#ifdef CONFIG_SMP
+#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+
+/* minimum unit size, also is the maximum supported allocation size */
+#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
+
+/*
+ * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
+ * back on the first chunk for dynamic percpu allocation if arch is
+ * manually allocating and mapping it for faster access (as a part of
+ * large page mapping for example).
+ *
+ * The following values give between one and two pages of free space
+ * after typical minimal boot (2-way SMP, single disk and NIC) with
+ * both defconfig and a distro config on x86_64 and 32. More
+ * intelligent way to determine this would be nice.
+ */
+#if BITS_PER_LONG > 32
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#else
+#define PERCPU_DYNAMIC_RESERVE (12 << 10)
+#endif
+
+extern void *pcpu_base_addr;
+
+typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
+typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
+
+extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size,
+ void *base_addr,
+ pcpu_populate_pte_fn_t populate_pte_fn);
+
+extern ssize_t __init pcpu_embed_first_chunk(
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size);
+
+/*
+ * Use this to get to a cpu's version of the per-cpu object
+ * dynamically allocated. Non-atomic access to the current CPU's
+ * version should probably be combined with get_cpu()/put_cpu().
+ */
+#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
+
+extern void *__alloc_reserved_percpu(size_t size, size_t align);
+
+#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
struct percpu_data {
void *ptrs[1];
};
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-/*
- * Use this to get to a cpu's version of the per-cpu object dynamically
- * allocated. Non-atomic access to the current CPU's version should
- * probably be combined with get_cpu()/put_cpu().
- */
-#define percpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
+
+#define per_cpu_ptr(ptr, cpu) \
+({ \
+ struct percpu_data *__p = __percpu_disguise(ptr); \
+ (__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
-extern void percpu_free(void *__pdata);
+#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
+extern void *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void *__pdata);
#else /* CONFIG_SMP */
-#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+static inline void *__alloc_percpu(size_t size, size_t align)
{
- return kzalloc(size, gfp);
+ /*
+ * Can't easily make larger alignment work with kmalloc. WARN
+ * on it. Larger alignment should only be used for module
+ * percpu sections on SMP for which this path isn't used.
+ */
+ WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+ return kzalloc(size, GFP_KERNEL);
}
-static inline void percpu_free(void *__pdata)
+static inline void free_percpu(void *p)
{
- kfree(__pdata);
+ kfree(p);
}
#endif /* CONFIG_SMP */
-#define percpu_alloc_mask(size, gfp, mask) \
- __percpu_alloc_mask((size), (gfp), &(mask))
-
-#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
-
-/* (legacy) interface for use without CPU hotplug handling */
-
-#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
- cpu_possible_map)
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
-#define free_percpu(ptr) percpu_free((ptr))
-#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
+#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
#endif /* __LINUX_PERCPU_H */