VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 68
+ SUBLEVEL = 69
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -fno-delete-null-pointer-checks
+ -fno-delete-null-pointer-checks \
+ -w
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
#ifndef __ASM_CPUTYPE_H
#define __ASM_CPUTYPE_H
-#define ID_MIDR_EL1 "midr_el1"
-#define ID_MPIDR_EL1 "mpidr_el1"
-#define ID_CTR_EL0 "ctr_el0"
+#define INVALID_HWID ULONG_MAX
-#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
-#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1"
-#define ID_AA64AFR0_EL1 "id_aa64afr0_el1"
-#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
-#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
+#define MPIDR_SMP_BITMASK (0x3 << 30)
+#define MPIDR_SMP_VALUE (0x2 << 30)
-#define INVALID_HWID ULONG_MAX
+#define MPIDR_MT_BITMASK (0x1 << 24)
#define MPIDR_HWID_BITMASK 0xff00ffffff
+#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
+
+#define MPIDR_LEVEL_BITS_SHIFT 3
+#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+
+#define MPIDR_LEVEL_SHIFT(level) \
+ (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+
#define read_cpuid(reg) ({ \
u64 __val; \
- asm("mrs %0, " reg : "=r" (__val)); \
+ asm("mrs %0, " #reg : "=r" (__val)); \
__val; \
})
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_PART_AEM_V8 0xD0F0
#define ARM_CPU_PART_FOUNDATION 0xD000
+#define ARM_CPU_PART_CORTEX_A53 0xD030
#define ARM_CPU_PART_CORTEX_A57 0xD070
+#define APM_CPU_PART_POTENZA 0x0000
+
#ifndef __ASSEMBLY__
/*
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(ID_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(ID_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(ID_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
+ void cpuinfo_store_cpu(void);
+
#endif /* __ASSEMBLY__ */
#endif
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+ #include <linux/personality.h>
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
+#endif
+
static const char *cpu_name;
static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
printk("%s", buf);
}
- void __init smp_setup_processor_id(void)
+ struct cpuinfo_arm64 {
+ struct cpu cpu;
+ u32 reg_midr;
+ };
+
+ static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+
+ void cpuinfo_store_cpu(void)
+ {
+ struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
+ info->reg_midr = read_cpuid_id();
+ }
+
+ static void __init setup_processor(void)
{
- struct cpu_info *cpu_info;
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
+}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+struct mpidr_hash mpidr_hash;
+#ifdef CONFIG_SMP
+/**
+ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
+ * level in order to build a linear index from an
+ * MPIDR value. Resulting algorithm is a collision
+ * free hash carried out through shifting and ORing
+ */
+static void __init smp_build_mpidr_hash(void)
+{
+ u32 i, affinity, fs[4], bits[4], ls;
+ u64 mask = 0;
+ /*
+ * Pre-scan the list of MPIDRS and filter out bits that do
+ * not contribute to affinity levels, ie they never toggle.
+ */
+ for_each_possible_cpu(i)
+ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
+ pr_debug("mask of set bits %#llx\n", mask);
+ /*
+ * Find and stash the last and first bit set at all affinity levels to
+ * check how many bits are required to represent them.
+ */
+ for (i = 0; i < 4; i++) {
+ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
+ /*
+ * Find the MSB bit and LSB bits position
+ * to determine how many bits are required
+ * to express the affinity level.
+ */
+ ls = fls(affinity);
+ fs[i] = affinity ? ffs(affinity) - 1 : 0;
+ bits[i] = ls - fs[i];
+ }
/*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc.S
+ * An index can be created from the MPIDR_EL1 by isolating the
+ * significant bits at each affinity level and by shifting
+ * them in order to compress the 32 bits values space to a
+ * compressed set of values. This is equivalent to hashing
+ * the MPIDR_EL1 through shifting and ORing. It is a collision free
+ * hash though not minimal since some levels might contain a number
+ * of CPUs that is not an exact power of 2 and their bit
+ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
*/
+ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
+ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
+ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
+ (bits[1] + bits[0]);
+ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
+ fs[3] - (bits[2] + bits[1] + bits[0]);
+ mpidr_hash.mask = mask;
+ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
+ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
+ mpidr_hash.shift_aff[0],
+ mpidr_hash.shift_aff[1],
+ mpidr_hash.shift_aff[2],
+ mpidr_hash.shift_aff[3],
+ mpidr_hash.mask,
+ mpidr_hash.bits);
+ /*
+ * 4x is an arbitrary value used to warn on a hash table much bigger
+ * than expected on most systems.
+ */
+ if (mpidr_hash_size() > 4 * num_possible_cpus())
+ pr_warn("Large number of MPIDR hash buckets detected\n");
+ __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
+}
+#endif
+
+static void __init setup_processor(void)
+{
+ struct cpu_info *cpu_info;
+ u64 features, block;
+
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
printk("CPU configuration botched (ID %08x), unable to continue.\n",
sprintf(init_utsname()->machine, "aarch64");
elf_hwcap = 0;
+
+ /*
+ * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
+ * The blocks we test below represent incremental functionality
+ * for non-negative values. Negative values are reserved.
+ */
+ features = read_cpuid(ID_AA64ISAR0_EL1);
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ elf_hwcap |= HWCAP_PMULL;
+ case 1:
+ elf_hwcap |= HWCAP_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
struct boot_param_header *devtree;
unsigned long dt_root;
+ cpuinfo_store_cpu();
+
/* Check we have a non-NULL DT pointer */
if (!dt_phys) {
early_print("\n"
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
+ smp_build_mpidr_hash();
#endif
#ifdef CONFIG_VT
conswitchp = &dummy_con;
#endif
#endif
+
}
static int __init arm64_device_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
- static DEFINE_PER_CPU(struct cpu, cpu_data);
-
static int __init topology_init(void)
{
int i;
for_each_possible_cpu(i) {
- struct cpu *cpu = &per_cpu(cpu_data, i);
+ struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
cpu->hotpluggable = 1;
register_cpu(cpu, i);
}
static const char *hwcap_str[] = {
"fp",
"asimd",
+ "evtstrm",
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
NULL
};
+ #ifdef CONFIG_COMPAT
+ static const char *compat_hwcap_str[] = {
+ "swp",
+ "half",
+ "thumb",
+ "26bit",
+ "fastmult",
+ "fpa",
+ "vfp",
+ "edsp",
+ "java",
+ "iwmmxt",
+ "crunch",
+ "thumbee",
+ "neon",
+ "vfpv3",
+ "vfpv3d16",
+ "tls",
+ "vfpv4",
+ "idiva",
+ "idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm"
+ };
+ #endif /* CONFIG_COMPAT */
+
static int c_show(struct seq_file *m, void *v)
{
- int i;
-
- seq_printf(m, "Processor\t: %s rev %d (%s)\n",
- cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
+ int i, j;
for_each_online_cpu(i) {
+ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+ u32 midr = cpuinfo->reg_midr;
+
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
loops_per_jiffy / (5000UL/HZ) % 100);
- }
- /* dump out the processor features */
- seq_puts(m, "Features\t: ");
-
- for (i = 0; hwcap_str[i]; i++)
- if (elf_hwcap & (1 << i))
- seq_printf(m, "%s ", hwcap_str[i]);
- #ifdef CONFIG_ARMV7_COMPAT_CPUINFO
- if (is_compat_task()) {
- /* Print out the non-optional ARMv8 HW capabilities */
- seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi ");
- seq_printf(m, "vfpv4 idiva idivt ");
- }
- #endif
-
- seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
- seq_printf(m, "CPU architecture: %s\n",
- #if IS_ENABLED(CONFIG_ARMV7_COMPAT_CPUINFO)
- is_compat_task() ? "8" :
- #endif
- "AArch64");
- seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
- seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
- seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
-
- seq_puts(m, "\n");
+ /*
+ * Dump out the common processor features in a single line.
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+ * rather than attempting to parse this, but there's a body of
+ * software which does already (at least for 32-bit).
+ */
+ seq_puts(m, "Features\t:");
+ if (personality(current->personality) == PER_LINUX32) {
+ #ifdef CONFIG_COMPAT
+ for (j = 0; compat_hwcap_str[j]; j++)
+ if (COMPAT_ELF_HWCAP & (1 << j))
+ seq_printf(m, " %s", compat_hwcap_str[j]);
+ #endif /* CONFIG_COMPAT */
+ } else {
+ for (j = 0; hwcap_str[j]; j++)
+ if (elf_hwcap & (1 << j))
+ seq_printf(m, " %s", hwcap_str[j]);
+ }
+ seq_puts(m, "\n");
- seq_printf(m, "Hardware\t: %s\n", machine_name);
+ seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
+ seq_printf(m, "CPU architecture: 8\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
+ seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
+ seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
+ }
return 0;
}
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#include <linux/mt_sched_mon.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CPU_STOP,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void __cpuinit write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
-
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
return ret;
}
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+ store_cpu_topology(cpuid);
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
-
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
+ /*
+ * Log the CPU info before it is marked online and might get read.
+ */
+ cpuinfo_store_cpu();
+
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
set_cpu_online(cpu, true);
complete(&cpu_running);
+ smp_store_cpu_info(cpu);
+
/*
* Enable GIC and timers.
*/
notify_cpu_starting(cpu);
+ local_dbg_enable();
local_irq_enable();
local_fiq_enable();
cpu_startup_entry(CPUHP_ONLINE);
}
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
{
- unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
- pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- num_online_cpus(), bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
}
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
{
-}
+ unsigned int cpu = smp_processor_id();
+ int ret;
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
-static const struct smp_enable_ops *enable_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+ return 0;
+}
+
+static int op_cpu_kill(unsigned int cpu)
{
- const struct smp_enable_ops **ops = enable_ops;
+ /*
+ * If we have no means of synchronising with the dying CPU, then assume
+ * that it is really dead. We can only wait for an arbitrary length of
+ * time and hope that it's dead, so let's skip the wait and just hope.
+ */
+ if (!cpu_ops[cpu]->cpu_kill)
+ return 1;
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
+ return cpu_ops[cpu]->cpu_kill(cpu);
+}
+
+static DECLARE_COMPLETION(cpu_died);
- ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
}
+ pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+ * Now that the dying CPU is beyond the point of no return w.r.t.
+ * in-kernel synchronisation, try to get the firwmare to help us to
+ * verify that it has really left the kernel before we consider
+ * clobbering anything it might still be using.
+ */
+ if (!op_cpu_kill(cpu))
+ pr_warn("CPU%d may not have shut down cleanly\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
+
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ cpu_ops[cpu]->cpu_die(cpu);
- return NULL;
+ BUG();
+}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+
+ pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(), bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
/*
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
-
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
-
- if (!smp_enable_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
- goto next;
- }
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
+ init_cpu_topology();
+
+ smp_store_cpu_info(smp_processor_id());
+
/*
* are we trying to boot more cores than exist?
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
}
}
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- smp_cross_call = fn;
+ __smp_cross_call = fn;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x - IPI_RESCHEDULE] = s
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __smp_cross_call(target, ipinr);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_present_cpu(cpu)
seq_printf(p, "%10u ",
return sum;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
static DEFINE_RAW_SPINLOCK(stop_lock);
/*
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
- __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
+ if ((unsigned)ipinr < NR_IPI) {
+ trace_ipi_entry(ipi_types[ipinr]);
+ __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ }
switch (ipinr) {
case IPI_RESCHEDULE:
case IPI_CALL_FUNC:
irq_enter();
+ mt_trace_ISR_start(ipinr);
generic_smp_call_function_interrupt();
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
case IPI_CALL_FUNC_SINGLE:
irq_enter();
+ mt_trace_ISR_start(ipinr);
generic_smp_call_function_single_interrupt();
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
case IPI_CPU_STOP:
irq_enter();
+ mt_trace_ISR_start(ipinr);
ipi_cpu_stop(cpu);
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
*/
static int desc_to_gpio(const struct gpio_desc *desc)
{
- return desc->chip->base + gpio_chip_hwgpio(desc);
+ return desc - &gpio_desc[0];
}
if (tdev != NULL) {
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
name);
+ put_device(tdev);
} else {
status = -ENODEV;
}
}
status = sysfs_set_active_low(desc, dev, value);
-
+ put_device(dev);
unlock:
mutex_unlock(&sysfs_lock);
}
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&chip->pin_ranges);
#endif
of_gpiochip_add(chip);
-unlock:
- spin_unlock_irqrestore(&gpio_lock, flags);
-
if (status)
goto fail;
chip->label ? : "generic");
return 0;
+
+unlock:
+ spin_unlock_irqrestore(&gpio_lock, flags);
fail:
/* failures here can mean systems won't boot... */
pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
ti->ti_count = 0;
ti->ti_save = cur_ti;
ti->ti_magic = NILFS_TI_MAGIC;
- INIT_LIST_HEAD(&ti->ti_garbage);
current->journal_info = ti;
for (;;) {
up_write(&nilfs->ns_segctor_sem);
current->journal_info = ti->ti_save;
- if (!list_empty(&ti->ti_garbage))
- nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
}
static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
}
}
+ static void nilfs_iput_work_func(struct work_struct *work)
+ {
+ struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
+ sc_iput_work);
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
+ }
+
static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
struct nilfs_root *root)
{
static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_transaction_info *ti = current->journal_info;
struct nilfs_inode_info *ii, *n;
+ int defer_iput = false;
spin_lock(&nilfs->ns_inode_lock);
list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
clear_bit(NILFS_I_BUSY, &ii->i_state);
brelse(ii->i_bh);
ii->i_bh = NULL;
- list_move_tail(&ii->i_dirty, &ti->ti_garbage);
+ list_del_init(&ii->i_dirty);
+ if (!ii->vfs_inode.i_nlink) {
+ /*
+ * Defer calling iput() to avoid a deadlock
+ * over I_SYNC flag for inodes with i_nlink == 0
+ */
+ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+ defer_iput = true;
+ } else {
+ spin_unlock(&nilfs->ns_inode_lock);
+ iput(&ii->vfs_inode);
+ spin_lock(&nilfs->ns_inode_lock);
+ }
}
spin_unlock(&nilfs->ns_inode_lock);
+
+ if (defer_iput)
+ schedule_work(&sci->sc_iput_work);
}
/*
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int timeout = 0;
+ set_freezable();
+
sci->sc_timer.data = (unsigned long)current;
sci->sc_timer.function = nilfs_construction_timeout;
INIT_LIST_HEAD(&sci->sc_segbufs);
INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes);
+ INIT_LIST_HEAD(&sci->sc_iput_queue);
+ INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
init_timer(&sci->sc_timer);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_transaction_unlock(sci->sc_super);
+ flush_work(&sci->sc_iput_work);
+
} while (ret && retrycount-- > 0);
}
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
+ if (flush_work(&sci->sc_iput_work))
+ flag = true;
+
if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci);
nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
}
+ if (!list_empty(&sci->sc_iput_queue)) {
+ nilfs_warning(sci->sc_super, __func__,
+ "iput queue is not empty\n");
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
+ }
+
WARN_ON(!list_empty(&sci->sc_segbufs));
WARN_ON(!list_empty(&sci->sc_write_logs));