Merge branch 'bind_unbind' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / arch / powerpc / kvm / book3s_xive_template.c
1 /*
2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9 /* File to be included by other .c files */
10
11 #define XGLUE(a,b) a##b
12 #define GLUE(a,b) XGLUE(a,b)
13
14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
15 {
16 u8 cppr;
17 u16 ack;
18
19 /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
20
21 /* Perform the acknowledge OS to register cycle. */
22 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
23
24 /* Synchronize subsequent queue accesses */
25 mb();
26
27 /* XXX Check grouping level */
28
29 /* Anything ? */
30 if (!((ack >> 8) & TM_QW1_NSR_EO))
31 return;
32
33 /* Grab CPPR of the most favored pending interrupt */
34 cppr = ack & 0xff;
35 if (cppr < 8)
36 xc->pending |= 1 << cppr;
37
38 #ifdef XIVE_RUNTIME_CHECKS
39 /* Check consistency */
40 if (cppr >= xc->hw_cppr)
41 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
42 smp_processor_id(), cppr, xc->hw_cppr);
43 #endif
44
45 /*
46 * Update our image of the HW CPPR. We don't yet modify
47 * xc->cppr, this will be done as we scan for interrupts
48 * in the queues.
49 */
50 xc->hw_cppr = cppr;
51 }
52
53 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
54 {
55 u64 val;
56
57 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
58 offset |= offset << 4;
59
60 val =__x_readq(__x_eoi_page(xd) + offset);
61 #ifdef __LITTLE_ENDIAN__
62 val >>= 64-8;
63 #endif
64 return (u8)val;
65 }
66
67
68 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
69 {
70 /* If the XIVE supports the new "store EOI facility, use it */
71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
72 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
74 opal_int_eoi(hw_irq);
75 } else {
76 uint64_t eoi_val;
77
78 /*
79 * Otherwise for EOI, we use the special MMIO that does
80 * a clear of both P and Q and returns the old Q,
81 * except for LSIs where we use the "EOI cycle" special
82 * load.
83 *
84 * This allows us to then do a re-trigger if Q was set
85 * rather than synthetizing an interrupt in software
86 *
87 * For LSIs, using the HW EOI cycle works around a problem
88 * on P9 DD1 PHBs where the other ESB accesses don't work
89 * properly.
90 */
91 if (xd->flags & XIVE_IRQ_FLAG_LSI)
92 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
93 else {
94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
95
96 /* Re-trigger if needed */
97 if ((eoi_val & 1) && __x_trig_page(xd))
98 __x_writeq(0, __x_trig_page(xd));
99 }
100 }
101 }
102
103 enum {
104 scan_fetch,
105 scan_poll,
106 scan_eoi,
107 };
108
109 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
110 u8 pending, int scan_type)
111 {
112 u32 hirq = 0;
113 u8 prio = 0xff;
114
115 /* Find highest pending priority */
116 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
117 struct xive_q *q;
118 u32 idx, toggle;
119 __be32 *qpage;
120
121 /*
122 * If pending is 0 this will return 0xff which is what
123 * we want
124 */
125 prio = ffs(pending) - 1;
126
127 /*
128 * If the most favoured prio we found pending is less
129 * favored (or equal) than a pending IPI, we return
130 * the IPI instead.
131 *
132 * Note: If pending was 0 and mfrr is 0xff, we will
133 * not spurriously take an IPI because mfrr cannot
134 * then be smaller than cppr.
135 */
136 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
137 prio = xc->mfrr;
138 hirq = XICS_IPI;
139 break;
140 }
141
142 /* Don't scan past the guest cppr */
143 if (prio >= xc->cppr || prio > 7)
144 break;
145
146 /* Grab queue and pointers */
147 q = &xc->queues[prio];
148 idx = q->idx;
149 toggle = q->toggle;
150
151 /*
152 * Snapshot the queue page. The test further down for EOI
153 * must use the same "copy" that was used by __xive_read_eq
154 * since qpage can be set concurrently and we don't want
155 * to miss an EOI.
156 */
157 qpage = READ_ONCE(q->qpage);
158
159 skip_ipi:
160 /*
161 * Try to fetch from the queue. Will return 0 for a
162 * non-queueing priority (ie, qpage = 0).
163 */
164 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
165
166 /*
167 * If this was a signal for an MFFR change done by
168 * H_IPI we skip it. Additionally, if we were fetching
169 * we EOI it now, thus re-enabling reception of a new
170 * such signal.
171 *
172 * We also need to do that if prio is 0 and we had no
173 * page for the queue. In this case, we have non-queued
174 * IPI that needs to be EOId.
175 *
176 * This is safe because if we have another pending MFRR
177 * change that wasn't observed above, the Q bit will have
178 * been set and another occurrence of the IPI will trigger.
179 */
180 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
181 if (scan_type == scan_fetch)
182 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
183 &xc->vp_ipi_data);
184 /* Loop back on same queue with updated idx/toggle */
185 #ifdef XIVE_RUNTIME_CHECKS
186 WARN_ON(hirq && hirq != XICS_IPI);
187 #endif
188 if (hirq)
189 goto skip_ipi;
190 }
191
192 /* If fetching, update queue pointers */
193 if (scan_type == scan_fetch) {
194 q->idx = idx;
195 q->toggle = toggle;
196 }
197
198 /* Something found, stop searching */
199 if (hirq)
200 break;
201
202 /* Clear the pending bit on the now empty queue */
203 pending &= ~(1 << prio);
204
205 /*
206 * Check if the queue count needs adjusting due to
207 * interrupts being moved away.
208 */
209 if (atomic_read(&q->pending_count)) {
210 int p = atomic_xchg(&q->pending_count, 0);
211 if (p) {
212 #ifdef XIVE_RUNTIME_CHECKS
213 WARN_ON(p > atomic_read(&q->count));
214 #endif
215 atomic_sub(p, &q->count);
216 }
217 }
218 }
219
220 /* If we are just taking a "peek", do nothing else */
221 if (scan_type == scan_poll)
222 return hirq;
223
224 /* Update the pending bits */
225 xc->pending = pending;
226
227 /*
228 * If this is an EOI that's it, no CPPR adjustment done here,
229 * all we needed was cleanup the stale pending bits and check
230 * if there's anything left.
231 */
232 if (scan_type == scan_eoi)
233 return hirq;
234
235 /*
236 * If we found an interrupt, adjust what the guest CPPR should
237 * be as if we had just fetched that interrupt from HW.
238 */
239 if (hirq)
240 xc->cppr = prio;
241 /*
242 * If it was an IPI the HW CPPR might have been lowered too much
243 * as the HW interrupt we use for IPIs is routed to priority 0.
244 *
245 * We re-sync it here.
246 */
247 if (xc->cppr != xc->hw_cppr) {
248 xc->hw_cppr = xc->cppr;
249 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
250 }
251
252 return hirq;
253 }
254
255 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
256 {
257 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
258 u8 old_cppr;
259 u32 hirq;
260
261 pr_devel("H_XIRR\n");
262
263 xc->GLUE(X_STAT_PFX,h_xirr)++;
264
265 /* First collect pending bits from HW */
266 GLUE(X_PFX,ack_pending)(xc);
267
268 /*
269 * Cleanup the old-style bits if needed (they may have been
270 * set by pull or an escalation interrupts).
271 */
272 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
273 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
274 &vcpu->arch.pending_exceptions);
275
276 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
277 xc->pending, xc->hw_cppr, xc->cppr);
278
279 /* Grab previous CPPR and reverse map it */
280 old_cppr = xive_prio_to_guest(xc->cppr);
281
282 /* Scan for actual interrupts */
283 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
284
285 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
286 hirq, xc->hw_cppr, xc->cppr);
287
288 #ifdef XIVE_RUNTIME_CHECKS
289 /* That should never hit */
290 if (hirq & 0xff000000)
291 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
292 #endif
293
294 /*
295 * XXX We could check if the interrupt is masked here and
296 * filter it. If we chose to do so, we would need to do:
297 *
298 * if (masked) {
299 * lock();
300 * if (masked) {
301 * old_Q = true;
302 * hirq = 0;
303 * }
304 * unlock();
305 * }
306 */
307
308 /* Return interrupt and old CPPR in GPR4 */
309 vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
310
311 return H_SUCCESS;
312 }
313
314 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
315 {
316 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
317 u8 pending = xc->pending;
318 u32 hirq;
319 u8 pipr;
320
321 pr_devel("H_IPOLL(server=%ld)\n", server);
322
323 xc->GLUE(X_STAT_PFX,h_ipoll)++;
324
325 /* Grab the target VCPU if not the current one */
326 if (xc->server_num != server) {
327 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
328 if (!vcpu)
329 return H_PARAMETER;
330 xc = vcpu->arch.xive_vcpu;
331
332 /* Scan all priorities */
333 pending = 0xff;
334 } else {
335 /* Grab pending interrupt if any */
336 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
337 if (pipr < 8)
338 pending |= 1 << pipr;
339 }
340
341 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
342
343 /* Return interrupt and old CPPR in GPR4 */
344 vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
345
346 return H_SUCCESS;
347 }
348
349 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
350 {
351 u8 pending, prio;
352
353 pending = xc->pending;
354 if (xc->mfrr != 0xff) {
355 if (xc->mfrr < 8)
356 pending |= 1 << xc->mfrr;
357 else
358 pending |= 0x80;
359 }
360 if (!pending)
361 return;
362 prio = ffs(pending) - 1;
363
364 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
365 }
366
367 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
368 {
369 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
370 u8 old_cppr;
371
372 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
373
374 xc->GLUE(X_STAT_PFX,h_cppr)++;
375
376 /* Map CPPR */
377 cppr = xive_prio_from_guest(cppr);
378
379 /* Remember old and update SW state */
380 old_cppr = xc->cppr;
381 xc->cppr = cppr;
382
383 /*
384 * We are masking less, we need to look for pending things
385 * to deliver and set VP pending bits accordingly to trigger
386 * a new interrupt otherwise we might miss MFRR changes for
387 * which we have optimized out sending an IPI signal.
388 */
389 if (cppr > old_cppr)
390 GLUE(X_PFX,push_pending_to_hw)(xc);
391
392 /* Apply new CPPR */
393 xc->hw_cppr = cppr;
394 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
395
396 return H_SUCCESS;
397 }
398
399 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
400 {
401 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
402 struct kvmppc_xive_src_block *sb;
403 struct kvmppc_xive_irq_state *state;
404 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
405 struct xive_irq_data *xd;
406 u8 new_cppr = xirr >> 24;
407 u32 irq = xirr & 0x00ffffff, hw_num;
408 u16 src;
409 int rc = 0;
410
411 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
412
413 xc->GLUE(X_STAT_PFX,h_eoi)++;
414
415 xc->cppr = xive_prio_from_guest(new_cppr);
416
417 /*
418 * IPIs are synthetized from MFRR and thus don't need
419 * any special EOI handling. The underlying interrupt
420 * used to signal MFRR changes is EOId when fetched from
421 * the queue.
422 */
423 if (irq == XICS_IPI || irq == 0)
424 goto bail;
425
426 /* Find interrupt source */
427 sb = kvmppc_xive_find_source(xive, irq, &src);
428 if (!sb) {
429 pr_devel(" source not found !\n");
430 rc = H_PARAMETER;
431 goto bail;
432 }
433 state = &sb->irq_state[src];
434 kvmppc_xive_select_irq(state, &hw_num, &xd);
435
436 state->in_eoi = true;
437 mb();
438
439 again:
440 if (state->guest_priority == MASKED) {
441 arch_spin_lock(&sb->lock);
442 if (state->guest_priority != MASKED) {
443 arch_spin_unlock(&sb->lock);
444 goto again;
445 }
446 pr_devel(" EOI on saved P...\n");
447
448 /* Clear old_p, that will cause unmask to perform an EOI */
449 state->old_p = false;
450
451 arch_spin_unlock(&sb->lock);
452 } else {
453 pr_devel(" EOI on source...\n");
454
455 /* Perform EOI on the source */
456 GLUE(X_PFX,source_eoi)(hw_num, xd);
457
458 /* If it's an emulated LSI, check level and resend */
459 if (state->lsi && state->asserted)
460 __x_writeq(0, __x_trig_page(xd));
461
462 }
463
464 mb();
465 state->in_eoi = false;
466 bail:
467
468 /* Re-evaluate pending IRQs and update HW */
469 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
470 GLUE(X_PFX,push_pending_to_hw)(xc);
471 pr_devel(" after scan pending=%02x\n", xc->pending);
472
473 /* Apply new CPPR */
474 xc->hw_cppr = xc->cppr;
475 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
476
477 return rc;
478 }
479
480 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
481 unsigned long mfrr)
482 {
483 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
484
485 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
486
487 xc->GLUE(X_STAT_PFX,h_ipi)++;
488
489 /* Find target */
490 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
491 if (!vcpu)
492 return H_PARAMETER;
493 xc = vcpu->arch.xive_vcpu;
494
495 /* Locklessly write over MFRR */
496 xc->mfrr = mfrr;
497
498 /* Shoot the IPI if most favored than target cppr */
499 if (mfrr < xc->cppr)
500 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
501
502 return H_SUCCESS;
503 }