* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
-static unsigned long calc_range_end(unsigned long start, unsigned long end)
+static unsigned long aurora_range_end(unsigned long start, unsigned long end)
{
/*
* Limit the number of cache lines processed at once,
return end;
}
-/*
- * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
- * and range operations only do a TLB lookup on the start address.
- */
static void aurora_pa_range(unsigned long start, unsigned long end,
- unsigned long offset)
+ unsigned long offset)
{
void __iomem *base = l2x0_base;
+ unsigned long range_end;
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
- writel_relaxed(end, base + offset);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-
- writel_relaxed(0, base + AURORA_SYNC_REG);
-}
-
-static void aurora_inv_range(unsigned long start, unsigned long end)
-{
/*
* round start and end adresses up to cache line size
*/
end = ALIGN(end, CACHE_LINE_SIZE);
/*
- * Invalidate all full cache lines between 'start' and 'end'.
+ * perform operation on all full cache lines between 'start' and 'end'
*/
while (start < end) {
- unsigned long range_end = calc_range_end(start, end);
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_INVAL_RANGE_REG);
+ range_end = aurora_range_end(start, end);
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
+ writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+
+ writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
}
}
+static void aurora_inv_range(unsigned long start, unsigned long end)
+{
+ aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
+}
static void aurora_clean_range(unsigned long start, unsigned long end)
{
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
- if (!l2_wt_override) {
- start &= ~(CACHE_LINE_SIZE - 1);
- end = ALIGN(end, CACHE_LINE_SIZE);
- while (start != end) {
- unsigned long range_end = calc_range_end(start, end);
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_CLEAN_RANGE_REG);
- start = range_end;
- }
- }
+ if (!l2_wt_override)
+ aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
}
static void aurora_flush_range(unsigned long start, unsigned long end)
{
- start &= ~(CACHE_LINE_SIZE - 1);
- end = ALIGN(end, CACHE_LINE_SIZE);
- while (start != end) {
- unsigned long range_end = calc_range_end(start, end);
- /*
- * If L2 is forced to WT, the L2 will always be clean and we
- * just need to invalidate.
- */
- if (l2_wt_override)
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_INVAL_RANGE_REG);
- else
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_FLUSH_RANGE_REG);
- start = range_end;
- }
+ if (l2_wt_override)
+ aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
+ else
+ aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
}
static void aurora_flush_all(void)