summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2010-02-15 10:50:42 +0100
committerMichal Simek <monstr@monstr.eu>2010-02-24 13:18:29 +0100
commit0d670b24729be268eba98b3920b8571f60798d8d (patch)
tree10f35ab6ea332bb2f082c5ffaed60e6442d36508
parent75ef7cdda2daa35be9e070ac8e5258759ac03d06 (diff)
microblaze: Fix cache loop function for cache range
I create wrong asm code but none test shows that this part of code is wrong. I am not convinces that were good idea to create asm optimized macros for caches. The reason is that there is not optimization with previous code that's why make sense to add old code and do some benchmarking which functions are faster. Signed-off-by: Michal Simek <monstr@monstr.eu>
-rw-r--r--arch/microblaze/kernel/cpu/cache.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2f..2a56bccce4e0 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -172,16 +172,15 @@ do { \
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int step = -line_length; \
- int count = end - start; \
- BUG_ON(count <= 0); \
+ int volatile temp; \
+ BUG_ON(end - start <= 0); \
\
- __asm__ __volatile__ (" 1: addk %0, %0, %1; \
- " #op " %0, r0; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
- " : : "r" (start), "r" (count), \
- "r" (step) : "memory"); \
+ __asm__ __volatile__ (" 1: " #op " %1, r0; \
+ cmpu %0, %1, %2; \
+ bgtid %0, 1b; \
+ addk %1, %1, %3; \
+ " : : "r" (temp), "r" (start), "r" (end),\
+ "r" (line_length) : "memory"); \
} while (0);
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
pr_debug("%s\n", __func__);
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
-
-#if 0
- unsigned int i;
-
- pr_debug("%s\n", __func__);
-
- /* Just loop through cache size and invalidate it */
- for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
- __invalidate_dcache(0, i);
-#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,