Commit | Line | Data |
---|---|---|
45736a72 MR |
1 | /* |
2 | * ACPI probing code for ARM performance counters. | |
3 | * | |
4 | * Copyright (C) 2017 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/acpi.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <linux/perf/arm_pmu.h> | |
16 | ||
17 | #include <asm/cputype.h> | |
18 | ||
19 | static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus); | |
20 | static DEFINE_PER_CPU(int, pmu_irqs); | |
21 | ||
22 | static int arm_pmu_acpi_register_irq(int cpu) | |
23 | { | |
24 | struct acpi_madt_generic_interrupt *gicc; | |
25 | int gsi, trigger; | |
26 | ||
27 | gicc = acpi_cpu_get_madt_gicc(cpu); | |
28 | if (WARN_ON(!gicc)) | |
29 | return -EINVAL; | |
30 | ||
31 | gsi = gicc->performance_interrupt; | |
477c50e8 WH |
32 | |
33 | /* | |
34 | * Per the ACPI spec, the MADT cannot describe a PMU that doesn't | |
35 | * have an interrupt. QEMU advertises this by using a GSI of zero, | |
36 | * which is not known to be valid on any hardware despite being | |
37 | * valid per the spec. Take the pragmatic approach and reject a | |
38 | * GSI of zero for now. | |
39 | */ | |
40 | if (!gsi) | |
41 | return 0; | |
42 | ||
45736a72 MR |
43 | if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) |
44 | trigger = ACPI_EDGE_SENSITIVE; | |
45 | else | |
46 | trigger = ACPI_LEVEL_SENSITIVE; | |
47 | ||
48 | /* | |
49 | * Helpfully, the MADT GICC doesn't have a polarity flag for the | |
50 | * "performance interrupt". Luckily, on compliant GICs the polarity is | |
51 | * a fixed value in HW (for both SPIs and PPIs) that we cannot change | |
52 | * from SW. | |
53 | * | |
54 | * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This | |
55 | * may not match the real polarity, but that should not matter. | |
56 | * | |
57 | * Other interrupt controllers are not supported with ACPI. | |
58 | */ | |
59 | return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH); | |
60 | } | |
61 | ||
62 | static void arm_pmu_acpi_unregister_irq(int cpu) | |
63 | { | |
64 | struct acpi_madt_generic_interrupt *gicc; | |
65 | int gsi; | |
66 | ||
67 | gicc = acpi_cpu_get_madt_gicc(cpu); | |
68 | if (!gicc) | |
69 | return; | |
70 | ||
71 | gsi = gicc->performance_interrupt; | |
72 | acpi_unregister_gsi(gsi); | |
73 | } | |
74 | ||
75 | static int arm_pmu_acpi_parse_irqs(void) | |
76 | { | |
77 | int irq, cpu, irq_cpu, err; | |
78 | ||
79 | for_each_possible_cpu(cpu) { | |
80 | irq = arm_pmu_acpi_register_irq(cpu); | |
81 | if (irq < 0) { | |
82 | err = irq; | |
83 | pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", | |
84 | cpu, err); | |
85 | goto out_err; | |
86 | } else if (irq == 0) { | |
87 | pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); | |
88 | } | |
89 | ||
90 | per_cpu(pmu_irqs, cpu) = irq; | |
91 | } | |
92 | ||
93 | return 0; | |
94 | ||
95 | out_err: | |
96 | for_each_possible_cpu(cpu) { | |
97 | irq = per_cpu(pmu_irqs, cpu); | |
98 | if (!irq) | |
99 | continue; | |
100 | ||
101 | arm_pmu_acpi_unregister_irq(cpu); | |
102 | ||
103 | /* | |
104 | * Blat all copies of the IRQ so that we only unregister the | |
105 | * corresponding GSI once (e.g. when we have PPIs). | |
106 | */ | |
107 | for_each_possible_cpu(irq_cpu) { | |
108 | if (per_cpu(pmu_irqs, irq_cpu) == irq) | |
109 | per_cpu(pmu_irqs, irq_cpu) = 0; | |
110 | } | |
111 | } | |
112 | ||
113 | return err; | |
114 | } | |
115 | ||
116 | static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) | |
117 | { | |
118 | unsigned long cpuid = read_cpuid_id(); | |
119 | struct arm_pmu *pmu; | |
120 | int cpu; | |
121 | ||
122 | for_each_possible_cpu(cpu) { | |
123 | pmu = per_cpu(probed_pmus, cpu); | |
124 | if (!pmu || pmu->acpi_cpuid != cpuid) | |
125 | continue; | |
126 | ||
127 | return pmu; | |
128 | } | |
129 | ||
130 | pmu = armpmu_alloc(); | |
131 | if (!pmu) { | |
132 | pr_warn("Unable to allocate PMU for CPU%d\n", | |
133 | smp_processor_id()); | |
134 | return NULL; | |
135 | } | |
136 | ||
137 | pmu->acpi_cpuid = cpuid; | |
138 | ||
139 | return pmu; | |
140 | } | |
141 | ||
142 | /* | |
143 | * This must run before the common arm_pmu hotplug logic, so that we can | |
144 | * associate a CPU and its interrupt before the common code tries to manage the | |
145 | * affinity and so on. | |
146 | * | |
147 | * Note that hotplug events are serialized, so we cannot race with another CPU | |
148 | * coming up. The perf core won't open events while a hotplug event is in | |
149 | * progress. | |
150 | */ | |
151 | static int arm_pmu_acpi_cpu_starting(unsigned int cpu) | |
152 | { | |
153 | struct arm_pmu *pmu; | |
154 | struct pmu_hw_events __percpu *hw_events; | |
155 | int irq; | |
156 | ||
157 | /* If we've already probed this CPU, we have nothing to do */ | |
158 | if (per_cpu(probed_pmus, cpu)) | |
159 | return 0; | |
160 | ||
161 | irq = per_cpu(pmu_irqs, cpu); | |
162 | ||
163 | pmu = arm_pmu_acpi_find_alloc_pmu(); | |
164 | if (!pmu) | |
165 | return -ENOMEM; | |
166 | ||
167 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | |
168 | ||
169 | per_cpu(probed_pmus, cpu) = pmu; | |
170 | ||
171 | /* | |
172 | * Log and request the IRQ so the core arm_pmu code can manage it. In | |
173 | * some situations (e.g. mismatched PPIs), we may fail to request the | |
174 | * IRQ. However, it may be too late for us to do anything about it. | |
175 | * The common ARM PMU code will log a warning in this case. | |
176 | */ | |
177 | hw_events = pmu->hw_events; | |
178 | per_cpu(hw_events->irq, cpu) = irq; | |
179 | armpmu_request_irq(pmu, cpu); | |
180 | ||
181 | /* | |
182 | * Ideally, we'd probe the PMU here when we find the first matching | |
183 | * CPU. We can't do that for several reasons; see the comment in | |
184 | * arm_pmu_acpi_init(). | |
185 | * | |
186 | * So for the time being, we're done. | |
187 | */ | |
188 | return 0; | |
189 | } | |
190 | ||
191 | int arm_pmu_acpi_probe(armpmu_init_fn init_fn) | |
192 | { | |
193 | int pmu_idx = 0; | |
194 | int cpu, ret; | |
195 | ||
196 | if (acpi_disabled) | |
197 | return 0; | |
198 | ||
199 | /* | |
200 | * Initialise and register the set of PMUs which we know about right | |
201 | * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we | |
202 | * could handle late hotplug, but this may lead to deadlock since we | |
203 | * might try to register a hotplug notifier instance from within a | |
204 | * hotplug notifier. | |
205 | * | |
206 | * There's also the problem of having access to the right init_fn, | |
207 | * without tying this too deeply into the "real" PMU driver. | |
208 | * | |
209 | * For the moment, as with the platform/DT case, we need at least one | |
210 | * of a PMU's CPUs to be online at probe time. | |
211 | */ | |
212 | for_each_possible_cpu(cpu) { | |
213 | struct arm_pmu *pmu = per_cpu(probed_pmus, cpu); | |
214 | char *base_name; | |
215 | ||
216 | if (!pmu || pmu->name) | |
217 | continue; | |
218 | ||
219 | ret = init_fn(pmu); | |
220 | if (ret == -ENODEV) { | |
221 | /* PMU not handled by this driver, or not present */ | |
222 | continue; | |
223 | } else if (ret) { | |
224 | pr_warn("Unable to initialise PMU for CPU%d\n", cpu); | |
225 | return ret; | |
226 | } | |
227 | ||
228 | base_name = pmu->name; | |
229 | pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++); | |
230 | if (!pmu->name) { | |
231 | pr_warn("Unable to allocate PMU name for CPU%d\n", cpu); | |
232 | return -ENOMEM; | |
233 | } | |
234 | ||
235 | ret = armpmu_register(pmu); | |
236 | if (ret) { | |
237 | pr_warn("Failed to register PMU for CPU%d\n", cpu); | |
a88dc7ba | 238 | kfree(pmu->name); |
45736a72 MR |
239 | return ret; |
240 | } | |
241 | } | |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
246 | static int arm_pmu_acpi_init(void) | |
247 | { | |
248 | int ret; | |
249 | ||
250 | if (acpi_disabled) | |
251 | return 0; | |
252 | ||
253 | /* | |
254 | * We can't request IRQs yet, since we don't know the cookie value | |
255 | * until we know which CPUs share the same logical PMU. We'll handle | |
256 | * that in arm_pmu_acpi_cpu_starting(). | |
257 | */ | |
258 | ret = arm_pmu_acpi_parse_irqs(); | |
259 | if (ret) | |
260 | return ret; | |
261 | ||
262 | ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING, | |
263 | "perf/arm/pmu_acpi:starting", | |
264 | arm_pmu_acpi_cpu_starting, NULL); | |
265 | ||
266 | return ret; | |
267 | } | |
268 | subsys_initcall(arm_pmu_acpi_init) |