Skip to content

Commit bd12976

Browse files
committed
ARC: cacheflush refactor #3: Unify the {d,i}cache flush leaf helpers
With Line length being constant now, we can fold the 2 helpers into 1. This allows applying any optimizations (forthcoming) to single place. Signed-off-by: Vineet Gupta <[email protected]>
1 parent 63d2dfd commit bd12976

File tree

1 file changed

+55
-84
lines changed

1 file changed

+55
-84
lines changed

arch/arc/mm/cache_arc700.c

Lines changed: 55 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,59 @@ void arc_cache_init(void)
240240
#define OP_INV 0x1
241241
#define OP_FLUSH 0x2
242242
#define OP_FLUSH_N_INV 0x3
243+
#define OP_INV_IC 0x4
244+
245+
/*
246+
* Common Helper for Line Operations on {I,D}-Cache
247+
*/
248+
static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
249+
unsigned long sz, const int cacheop)
250+
{
251+
unsigned int aux_cmd, aux_tag;
252+
int num_lines;
253+
254+
if (cacheop == OP_INV_IC) {
255+
aux_cmd = ARC_REG_IC_IVIL;
256+
aux_tag = ARC_REG_IC_PTAG;
257+
}
258+
else {
259+
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
260+
aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
261+
aux_tag = ARC_REG_DC_PTAG;
262+
}
263+
264+
/* Ensure we properly floor/ceil the non-line aligned/sized requests
265+
* and have @paddr - aligned to cache line and integral @num_lines.
266+
* This however can be avoided for page sized since:
267+
* -@paddr will be cache-line aligned already (being page aligned)
268+
* -@sz will be integral multiple of line size (being page sized).
269+
*/
270+
if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
271+
sz += paddr & ~CACHE_LINE_MASK;
272+
paddr &= CACHE_LINE_MASK;
273+
vaddr &= CACHE_LINE_MASK;
274+
}
275+
276+
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
277+
278+
#if (CONFIG_ARC_MMU_VER <= 2)
279+
/* MMUv2 and before: paddr contains stuffed vaddrs bits */
280+
paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
281+
#endif
282+
283+
while (num_lines-- > 0) {
284+
#if (CONFIG_ARC_MMU_VER > 2)
285+
/* MMUv3, cache ops require paddr seperately */
286+
write_aux_reg(ARC_REG_DC_PTAG, paddr);
287+
288+
write_aux_reg(aux_cmd, vaddr);
289+
vaddr += L1_CACHE_BYTES;
290+
#else
291+
write_aux_reg(aux, paddr);
292+
#endif
293+
paddr += L1_CACHE_BYTES;
294+
}
295+
}
243296

244297
#ifdef CONFIG_ARC_HAS_DCACHE
245298

@@ -289,55 +342,6 @@ static inline void __dc_entire_op(const int cacheop)
289342
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
290343
}
291344

292-
/*
293-
* Per Line Operation on D-Cache
294-
* Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
295-
* It's sole purpose is to help gcc generate ZOL
296-
* (aliasing VIPT dcache flushing needs both vaddr and paddr)
297-
*/
298-
static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
299-
unsigned long sz, const int cacheop)
300-
{
301-
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
302-
const int aux = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
303-
int num_lines;
304-
305-
/* Ensure we properly floor/ceil the non-line aligned/sized requests
306-
* and have @paddr - aligned to cache line and integral @num_lines.
307-
* This however can be avoided for page sized since:
308-
* -@paddr will be cache-line aligned already (being page aligned)
309-
* -@sz will be integral multiple of line size (being page sized).
310-
*/
311-
if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
312-
sz += paddr & ~CACHE_LINE_MASK;
313-
paddr &= CACHE_LINE_MASK;
314-
vaddr &= CACHE_LINE_MASK;
315-
}
316-
317-
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
318-
319-
#if (CONFIG_ARC_MMU_VER <= 2)
320-
paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
321-
#endif
322-
323-
while (num_lines-- > 0) {
324-
#if (CONFIG_ARC_MMU_VER > 2)
325-
/*
326-
* Just as for I$, in MMU v3, D$ ops also require
327-
* "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
328-
*/
329-
write_aux_reg(ARC_REG_DC_PTAG, paddr);
330-
331-
write_aux_reg(aux, vaddr);
332-
vaddr += L1_CACHE_BYTES;
333-
#else
334-
/* paddr contains stuffed vaddrs bits */
335-
write_aux_reg(aux, paddr);
336-
#endif
337-
paddr += L1_CACHE_BYTES;
338-
}
339-
}
340-
341345
/* For kernel mappings cache operation: index is same as paddr */
342346
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
343347

@@ -362,7 +366,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
362366
write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
363367
}
364368

365-
__dc_line_loop(paddr, vaddr, sz, cacheop);
369+
__cache_line_loop(paddr, vaddr, sz, cacheop);
366370

367371
if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
368372
wait_for_flush();
@@ -434,42 +438,9 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
434438
unsigned long sz)
435439
{
436440
unsigned long flags;
437-
int num_lines;
438-
439-
/*
440-
* Ensure we properly floor/ceil the non-line aligned/sized requests:
441-
* However page sized flushes can be compile time optimised.
442-
* -@paddr will be cache-line aligned already (being page aligned)
443-
* -@sz will be integral multiple of line size (being page sized).
444-
*/
445-
if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
446-
sz += paddr & ~CACHE_LINE_MASK;
447-
paddr &= CACHE_LINE_MASK;
448-
vaddr &= CACHE_LINE_MASK;
449-
}
450-
451-
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
452-
453-
#if (CONFIG_ARC_MMU_VER <= 2)
454-
/* bits 17:13 of vaddr go as bits 4:0 of paddr */
455-
paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
456-
#endif
457441

458442
local_irq_save(flags);
459-
while (num_lines-- > 0) {
460-
#if (CONFIG_ARC_MMU_VER > 2)
461-
/* tag comes from phy addr */
462-
write_aux_reg(ARC_REG_IC_PTAG, paddr);
463-
464-
/* index bits come from vaddr */
465-
write_aux_reg(ARC_REG_IC_IVIL, vaddr);
466-
vaddr += L1_CACHE_BYTES;
467-
#else
468-
/* paddr contains stuffed vaddrs bits */
469-
write_aux_reg(ARC_REG_IC_IVIL, paddr);
470-
#endif
471-
paddr += L1_CACHE_BYTES;
472-
}
443+
__cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
473444
local_irq_restore(flags);
474445
}
475446

0 commit comments

Comments
 (0)