From: Linus Torvalds Date: Mon, 14 Dec 2009 17:58:24 +0000 (-0800) Subject: Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=d0316554d3586cbea60592a41391b5def2553d6f;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git Merge branch 'for-linus' of git://git./linux/kernel/git/tj/percpu * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits) m68k: rename global variable vmalloc_end to m68k_vmalloc_end percpu: add missing per_cpu_ptr_to_phys() definition for UP percpu: Fix kdump failure if booted with percpu_alloc=page percpu: make misc percpu symbols unique percpu: make percpu symbols in ia64 unique percpu: make percpu symbols in powerpc unique percpu: make percpu symbols in x86 unique percpu: make percpu symbols in xen unique percpu: make percpu symbols in cpufreq unique percpu: make percpu symbols in oprofile unique percpu: make percpu symbols in tracer unique percpu: make percpu symbols under kernel/ and mm/ unique percpu: remove some sparse warnings percpu: make alloc_percpu() handle array types vmalloc: fix use of non-existent percpu variable in put_cpu_var() this_cpu: Use this_cpu_xx in trace_functions_graph.c this_cpu: Use this_cpu_xx for ftrace this_cpu: Use this_cpu_xx in nmi handling this_cpu: Use this_cpu operations in RCU this_cpu: Use this_cpu ops for VM statistics ... Fix up trivial (famous last words) global per-cpu naming conflicts in arch/x86/kvm/svm.c mm/slab.c --- d0316554d3586cbea60592a41391b5def2553d6f diff --cc arch/x86/kvm/svm.c index 3de0b37ec038,6c79a14a3b6f..1d9b33843c80 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@@ -313,46 -316,40 +313,45 @@@ static void svm_hardware_disable(void * cpu_svm_disable(); } -static void svm_hardware_enable(void *garbage) +static int svm_hardware_enable(void *garbage) { - struct svm_cpu_data *svm_data; + struct svm_cpu_data *sd; uint64_t efer; struct descriptor_table gdt_descr; struct desc_struct *gdt; int me = raw_smp_processor_id(); + rdmsrl(MSR_EFER, efer); + if (efer & EFER_SVME) + return -EBUSY; + if (!has_svm()) { - printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); - return; + printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n", + me); + return -EINVAL; } - svm_data = per_cpu(svm_data, me); + sd = per_cpu(svm_data, me); - if (!svm_data) { + if (!sd) { - printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", + printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", me); - return; + return -EINVAL; } - svm_data->asid_generation = 1; - svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; - svm_data->next_asid = svm_data->max_asid + 1; + sd->asid_generation = 1; + sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; + sd->next_asid = sd->max_asid + 1; kvm_get_gdt(&gdt_descr); gdt = (struct desc_struct *)gdt_descr.base; - svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); + sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); - rdmsrl(MSR_EFER, efer); wrmsrl(MSR_EFER, efer | EFER_SVME); -- wrmsrl(MSR_VM_HSAVE_PA, - page_to_pfn(svm_data->save_area) << PAGE_SHIFT); - page_to_pfn(sd->save_area) << PAGE_SHIFT); ++ wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); + + return 0; } static void svm_cpu_uninit(int cpu) diff --cc arch/x86/xen/time.c index 9d1f853120d8,26e37b787ad3..0d3f07cd1b5f --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@@ -97,10 -97,10 +97,10 @@@ static void get_runstate_snapshot(struc /* return true when a vcpu could run but has no real cpu to run on */ bool xen_vcpu_stolen(int vcpu) { - return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; + return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; } -static void setup_runstate_info(int cpu) +void xen_setup_runstate_info(int cpu) { struct vcpu_register_runstate_memory_area area; diff --cc drivers/net/veth.c index 63099c58a6dd,0c4a81124257..3a15de56df9c --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@@ -153,8 -153,10 +153,8 @@@ static netdev_tx_t veth_xmit(struct sk_ struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; - int length, cpu; + int length; - skb_orphan(skb); - priv = netdev_priv(dev); rcv = priv->peer; rcv_priv = netdev_priv(rcv); diff --cc include/net/neighbour.h index 0302f31a2fb7,f28403ff7648..b0173202cad9 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@@ -88,14 -90,10 +88,9 @@@ struct neigh_statistics unsigned long unres_discards; /* number of unresolved drops */ }; - #define NEIGH_CACHE_STAT_INC(tbl, field) \ - do { \ - preempt_disable(); \ - (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \ - preempt_enable(); \ - } while (0) + #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) -struct neighbour -{ +struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; diff --cc kernel/lockdep.c index 4f8df01dbe51,8631320a50d0..429540c70d3f --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@@ -140,13 -140,9 +140,14 @@@ static inline struct lock_class *hlock_ } #ifdef CONFIG_LOCK_STAT - static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); + static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], + cpu_lock_stats); +static inline u64 lockstat_clock(void) +{ + return cpu_clock(smp_processor_id()); +} + static int lock_point(unsigned long points[], unsigned long ip) { int i; diff --cc mm/slab.c index a6c9166996a9,211b1746c63c..29b09599af7c --- a/mm/slab.c +++ b/mm/slab.c @@@ -697,7 -665,27 +697,7 @@@ static inline void init_lock_keys(void static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; - static DEFINE_PER_CPU(struct delayed_work, reap_work); -/* - * chicken and egg problem: delay the per-cpu array allocation - * until the general caches are up. - */ -static enum { - NONE, - PARTIAL_AC, - PARTIAL_L3, - EARLY, - FULL -} g_cpucache_up; - -/* - * used by boot code to determine if it can use slab based allocator - */ -int slab_is_available(void) -{ - return g_cpucache_up >= EARLY; -} - + static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) {