File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile
new file mode 100644
index 0000000..fceed4e
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/Makefile
@@ -0,0 +1,12 @@
+#
+# Build the appropriate CPU version support
+#
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cache.o = -pg
+endif
+
+ccflags-y := -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \
+		-DCPU_REV=$(CPU_REV)
+
+obj-y += cache.o cpuinfo.o cpuinfo-pvr-full.o cpuinfo-static.o mb.o pvr.o
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
new file mode 100644
index 0000000..0bde47e
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -0,0 +1,655 @@
+/*
+ * Cache control for MicroBlaze cache memories
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/cache.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+static inline void __enable_icache_msr(void)
+{
+	__asm__ __volatile__ ("	 msrset	r0, %0;"	\
+				"nop;"			\
+			: : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __disable_icache_msr(void)
+{
+	__asm__ __volatile__ ("	 msrclr	r0, %0;"	\
+				"nop;"			\
+			: : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __enable_dcache_msr(void)
+{
+	__asm__ __volatile__ ("	 msrset	r0, %0;"	\
+				"nop;"			\
+			: : "i" (MSR_DCE) : "memory");
+}
+
+static inline void __disable_dcache_msr(void)
+{
+	__asm__ __volatile__ ("	 msrclr	r0, %0;"	\
+				"nop; "			\
+			: : "i" (MSR_DCE) : "memory");
+}
+
+static inline void __enable_icache_nomsr(void)
+{
+	__asm__ __volatile__ ("	 mfs	r12, rmsr;"	\
+				"nop;"			\
+				"ori	r12, r12, %0;"	\
+				"mts	rmsr, r12;"	\
+				"nop;"			\
+			: : "i" (MSR_ICE) : "memory", "r12");
+}
+
+static inline void __disable_icache_nomsr(void)
+{
+	__asm__ __volatile__ ("	 mfs	r12, rmsr;"	\
+				"nop;"			\
+				"andi	r12, r12, ~%0;"	\
+				"mts	rmsr, r12;"	\
+				"nop;"			\
+			: : "i" (MSR_ICE) : "memory", "r12");
+}
+
+static inline void __enable_dcache_nomsr(void)
+{
+	__asm__ __volatile__ ("	 mfs	r12, rmsr;"	\
+				"nop;"			\
+				"ori	r12, r12, %0;"	\
+				"mts	rmsr, r12;"	\
+				"nop;"			\
+			: : "i" (MSR_DCE) : "memory", "r12");
+}
+
+static inline void __disable_dcache_nomsr(void)
+{
+	__asm__ __volatile__ ("	 mfs	r12, rmsr;"	\
+				"nop;"			\
+				"andi	r12, r12, ~%0;"	\
+				"mts	rmsr, r12;"	\
+				"nop;"			\
+			: : "i" (MSR_DCE) : "memory", "r12");
+}
+
+
+/* Helper macro for computing the limits of cache range loops
+ *
+ * End address can be unaligned which is OK for C implementation.
+ * ASM implementation align it in ASM macros
+ */
+#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)	\
+do {									\
+	int align = ~(cache_line_length - 1);				\
+	end = min(start + cache_size, end);				\
+	start &= align;							\
+} while (0)
+
+/*
+ * Helper macro to loop over the specified cache_size/line_length and
+ * execute 'op' on that cacheline
+ */
+#define CACHE_ALL_LOOP(cache_size, line_length, op)			\
+do {									\
+	unsigned int len = cache_size - line_length;			\
+	int step = -line_length;					\
+	WARN_ON(step >= 0);						\
+									\
+	__asm__ __volatile__ (" 1:      " #op " %0, r0;"		\
+					"bgtid   %0, 1b;"		\
+					"addk    %0, %0, %1;"		\
+					: : "r" (len), "r" (step)	\
+					: "memory");			\
+} while (0)
+
+/* Used for wdc.flush/clear which can use rB for offset which is not possible
+ * to use for simple wdc or wic.
+ *
+ * start address is cache aligned
+ * end address is not aligned, if end is aligned then I have to subtract
+ * cacheline length because I can't flush/invalidate the next cacheline.
+ * If is not, I align it because I will flush/invalidate whole line.
+ */
+#define CACHE_RANGE_LOOP_2(start, end, line_length, op)			\
+do {									\
+	int step = -line_length;					\
+	int align = ~(line_length - 1);					\
+	int count;							\
+	end = ((end & align) == end) ? end - line_length : end & align;	\
+	count = end - start;						\
+	WARN_ON(count < 0);						\
+									\
+	__asm__ __volatile__ (" 1:	" #op "	%0, %1;"		\
+					"bgtid	%1, 1b;"		\
+					"addk	%1, %1, %2;"		\
+					: : "r" (start), "r" (count),	\
+					"r" (step) : "memory");		\
+} while (0)
+
+/* It is used only first parameter for OP - for wic, wdc */
+#define CACHE_RANGE_LOOP_1(start, end, line_length, op)			\
+do {									\
+	unsigned int volatile temp = 0;						\
+	unsigned int align = ~(line_length - 1);					\
+	end = ((end & align) == end) ? end - line_length : end & align;	\
+	WARN_ON(end < start);					\
+									\
+	__asm__ __volatile__ (" 1:	" #op "	%1, r0;"		\
+					"cmpu	%0, %1, %2;"		\
+					"bgtid	%0, 1b;"		\
+					"addk	%1, %1, %3;"		\
+				: : "r" (temp), "r" (start), "r" (end),	\
+					"r" (line_length) : "memory");	\
+} while (0)
+
+#define ASM_LOOP
+
+static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+	local_irq_save(flags);
+	__disable_icache_msr();
+
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wic	%0, r0;"	\
+				: : "r" (i));
+#endif
+	__enable_icache_msr();
+	local_irq_restore(flags);
+}
+
+static void __flush_icache_range_nomsr_irq(unsigned long start,
+				unsigned long end)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+	local_irq_save(flags);
+	__disable_icache_nomsr();
+
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wic	%0, r0;"	\
+				: : "r" (i));
+#endif
+
+	__enable_icache_nomsr();
+	local_irq_restore(flags);
+}
+
+static void __flush_icache_range_noirq(unsigned long start,
+				unsigned long end)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.icache_line_length, cpuinfo.icache_size);
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wic	%0, r0;"	\
+				: : "r" (i));
+#endif
+}
+
+static void __flush_icache_all_msr_irq(void)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+
+	local_irq_save(flags);
+	__disable_icache_msr();
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+	for (i = 0; i < cpuinfo.icache_size;
+		 i += cpuinfo.icache_line_length)
+			__asm__ __volatile__ ("wic	%0, r0;" \
+					: : "r" (i));
+#endif
+	__enable_icache_msr();
+	local_irq_restore(flags);
+}
+
+static void __flush_icache_all_nomsr_irq(void)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+
+	local_irq_save(flags);
+	__disable_icache_nomsr();
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+	for (i = 0; i < cpuinfo.icache_size;
+		 i += cpuinfo.icache_line_length)
+			__asm__ __volatile__ ("wic	%0, r0;" \
+					: : "r" (i));
+#endif
+	__enable_icache_nomsr();
+	local_irq_restore(flags);
+}
+
+static void __flush_icache_all_noirq(void)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+	for (i = 0; i < cpuinfo.icache_size;
+		 i += cpuinfo.icache_line_length)
+			__asm__ __volatile__ ("wic	%0, r0;" \
+					: : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_all_msr_irq(void)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+
+	local_irq_save(flags);
+	__disable_dcache_msr();
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
+	__enable_dcache_msr();
+	local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_all_nomsr_irq(void)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+
+	local_irq_save(flags);
+	__disable_dcache_nomsr();
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
+	__enable_dcache_nomsr();
+	local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_all_noirq_wt(void)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
+}
+
+/*
+ * FIXME It is blindly invalidation as is expected
+ * but can't be called on noMMU in microblaze_cache_init below
+ *
+ * MS: noMMU kernel won't boot if simple wdc is used
+ * The reason should be that there are discared data which kernel needs
+ */
+static void __invalidate_dcache_all_wb(void)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+					wdc);
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_range_wb(unsigned long start,
+						unsigned long end)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+#else
+	for (i = start; i < end; i += cpuinfo.dcache_line_length)
+		__asm__ __volatile__ ("wdc.clear	%0, r0;"	\
+				: : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
+							unsigned long end)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = start; i < end; i += cpuinfo.dcache_line_length)
+		__asm__ __volatile__ ("wdc	%0, r0;"	\
+				: : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
+							unsigned long end)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+	local_irq_save(flags);
+	__disable_dcache_msr();
+
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = start; i < end; i += cpuinfo.dcache_line_length)
+		__asm__ __volatile__ ("wdc	%0, r0;"	\
+				: : "r" (i));
+#endif
+
+	__enable_dcache_msr();
+	local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
+							unsigned long end)
+{
+	unsigned long flags;
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+	local_irq_save(flags);
+	__disable_dcache_nomsr();
+
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = start; i < end; i += cpuinfo.dcache_line_length)
+		__asm__ __volatile__ ("wdc	%0, r0;"	\
+				: : "r" (i));
+#endif
+
+	__enable_dcache_nomsr();
+	local_irq_restore(flags);
+}
+
+static void __flush_dcache_all_wb(void)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+				wdc.flush);
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc.flush	%0, r0;" \
+					: : "r" (i));
+#endif
+}
+
+static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
+{
+#ifndef ASM_LOOP
+	int i;
+#endif
+	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+				(unsigned int)start, (unsigned int) end);
+
+	CACHE_LOOP_LIMITS(start, end,
+			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
+	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+#else
+	for (i = start; i < end; i += cpuinfo.dcache_line_length)
+		__asm__ __volatile__ ("wdc.flush	%0, r0;"	\
+				: : "r" (i));
+#endif
+}
+
+/* struct for wb caches and for wt caches */
+struct scache *mbc;
+
+/* new wb cache model */
+static const struct scache wb_msr = {
+	.ie = __enable_icache_msr,
+	.id = __disable_icache_msr,
+	.ifl = __flush_icache_all_noirq,
+	.iflr = __flush_icache_range_noirq,
+	.iin = __flush_icache_all_noirq,
+	.iinr = __flush_icache_range_noirq,
+	.de = __enable_dcache_msr,
+	.dd = __disable_dcache_msr,
+	.dfl = __flush_dcache_all_wb,
+	.dflr = __flush_dcache_range_wb,
+	.din = __invalidate_dcache_all_wb,
+	.dinr = __invalidate_dcache_range_wb,
+};
+
+/* There is only difference in ie, id, de, dd functions */
+static const struct scache wb_nomsr = {
+	.ie = __enable_icache_nomsr,
+	.id = __disable_icache_nomsr,
+	.ifl = __flush_icache_all_noirq,
+	.iflr = __flush_icache_range_noirq,
+	.iin = __flush_icache_all_noirq,
+	.iinr = __flush_icache_range_noirq,
+	.de = __enable_dcache_nomsr,
+	.dd = __disable_dcache_nomsr,
+	.dfl = __flush_dcache_all_wb,
+	.dflr = __flush_dcache_range_wb,
+	.din = __invalidate_dcache_all_wb,
+	.dinr = __invalidate_dcache_range_wb,
+};
+
+/* Old wt cache model with disabling irq and turn off cache */
+static const struct scache wt_msr = {
+	.ie = __enable_icache_msr,
+	.id = __disable_icache_msr,
+	.ifl = __flush_icache_all_msr_irq,
+	.iflr = __flush_icache_range_msr_irq,
+	.iin = __flush_icache_all_msr_irq,
+	.iinr = __flush_icache_range_msr_irq,
+	.de = __enable_dcache_msr,
+	.dd = __disable_dcache_msr,
+	.dfl = __invalidate_dcache_all_msr_irq,
+	.dflr = __invalidate_dcache_range_msr_irq_wt,
+	.din = __invalidate_dcache_all_msr_irq,
+	.dinr = __invalidate_dcache_range_msr_irq_wt,
+};
+
+static const struct scache wt_nomsr = {
+	.ie = __enable_icache_nomsr,
+	.id = __disable_icache_nomsr,
+	.ifl = __flush_icache_all_nomsr_irq,
+	.iflr = __flush_icache_range_nomsr_irq,
+	.iin = __flush_icache_all_nomsr_irq,
+	.iinr = __flush_icache_range_nomsr_irq,
+	.de = __enable_dcache_nomsr,
+	.dd = __disable_dcache_nomsr,
+	.dfl = __invalidate_dcache_all_nomsr_irq,
+	.dflr = __invalidate_dcache_range_nomsr_irq,
+	.din = __invalidate_dcache_all_nomsr_irq,
+	.dinr = __invalidate_dcache_range_nomsr_irq,
+};
+
+/* New wt cache model for newer Microblaze versions */
+static const struct scache wt_msr_noirq = {
+	.ie = __enable_icache_msr,
+	.id = __disable_icache_msr,
+	.ifl = __flush_icache_all_noirq,
+	.iflr = __flush_icache_range_noirq,
+	.iin = __flush_icache_all_noirq,
+	.iinr = __flush_icache_range_noirq,
+	.de = __enable_dcache_msr,
+	.dd = __disable_dcache_msr,
+	.dfl = __invalidate_dcache_all_noirq_wt,
+	.dflr = __invalidate_dcache_range_nomsr_wt,
+	.din = __invalidate_dcache_all_noirq_wt,
+	.dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+static const struct scache wt_nomsr_noirq = {
+	.ie = __enable_icache_nomsr,
+	.id = __disable_icache_nomsr,
+	.ifl = __flush_icache_all_noirq,
+	.iflr = __flush_icache_range_noirq,
+	.iin = __flush_icache_all_noirq,
+	.iinr = __flush_icache_range_noirq,
+	.de = __enable_dcache_nomsr,
+	.dd = __disable_dcache_nomsr,
+	.dfl = __invalidate_dcache_all_noirq_wt,
+	.dflr = __invalidate_dcache_range_nomsr_wt,
+	.din = __invalidate_dcache_all_noirq_wt,
+	.dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
+#define CPUVER_7_20_A	0x0c
+#define CPUVER_7_20_D	0x0f
+
+void microblaze_cache_init(void)
+{
+	if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
+		if (cpuinfo.dcache_wb) {
+			pr_info("wb_msr\n");
+			mbc = (struct scache *)&wb_msr;
+			if (cpuinfo.ver_code <= CPUVER_7_20_D) {
+				/* MS: problem with signal handling - hw bug */
+				pr_info("WB won't work properly\n");
+			}
+		} else {
+			if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+				pr_info("wt_msr_noirq\n");
+				mbc = (struct scache *)&wt_msr_noirq;
+			} else {
+				pr_info("wt_msr\n");
+				mbc = (struct scache *)&wt_msr;
+			}
+		}
+	} else {
+		if (cpuinfo.dcache_wb) {
+			pr_info("wb_nomsr\n");
+			mbc = (struct scache *)&wb_nomsr;
+			if (cpuinfo.ver_code <= CPUVER_7_20_D) {
+				/* MS: problem with signal handling - hw bug */
+				pr_info("WB won't work properly\n");
+			}
+		} else {
+			if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+				pr_info("wt_nomsr_noirq\n");
+				mbc = (struct scache *)&wt_nomsr_noirq;
+			} else {
+				pr_info("wt_nomsr\n");
+				mbc = (struct scache *)&wt_nomsr;
+			}
+		}
+	}
+	/*
+	 * FIXME Invalidation is done in U-BOOT
+	 * WT cache: Data is already written to main memory
+	 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
+	 */
+	/* invalidate_dcache(); */
+	enable_dcache();
+
+	invalidate_icache();
+	enable_icache();
+}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
new file mode 100644
index 0000000..a32daec
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -0,0 +1,115 @@
+/*
+ * Support for MicroBlaze PVR (processor version register)
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/pvr.h>
+#include <asm/cpuinfo.h>
+
+/*
+ * Helper macro to map between fields in our struct cpuinfo, and
+ * the PVR macros in pvr.h.
+ */
+
+#define CI(c, p) { ci->c = PVR_##p(pvr); }
+
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
+#define err_printk(x) \
+	early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
+#else
+#define err_printk(x) \
+	pr_info("ERROR: Microblaze " x "-different for PVR and DTS\n");
+#endif
+
+void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
+{
+	struct pvr_s pvr;
+	u32 temp; /* for saving temp value */
+	get_pvr(&pvr);
+
+	CI(ver_code, VERSION);
+	if (!ci->ver_code) {
+		pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n");
+		return;
+	}
+
+	temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |
+		PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr);
+	if (ci->use_instr != temp)
+		err_printk("BARREL, MSR, PCMP or DIV");
+	ci->use_instr = temp;
+
+	temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr);
+	if (ci->use_mult != temp)
+		err_printk("HW_MUL");
+	ci->use_mult = temp;
+
+	temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr);
+	if (ci->use_fpu != temp)
+		err_printk("HW_FPU");
+	ci->use_fpu = temp;
+
+	ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |
+			PVR_UNALIGNED_EXCEPTION(pvr) |
+			PVR_ILL_OPCODE_EXCEPTION(pvr) |
+			PVR_IOPB_BUS_EXCEPTION(pvr) |
+			PVR_DOPB_BUS_EXCEPTION(pvr) |
+			PVR_DIV_ZERO_EXCEPTION(pvr) |
+			PVR_FPU_EXCEPTION(pvr) |
+			PVR_FSL_EXCEPTION(pvr);
+
+	CI(pvr_user1, USER1);
+	CI(pvr_user2, USER2);
+
+	CI(mmu, USE_MMU);
+	CI(mmu_privins, MMU_PRIVINS);
+	CI(endian, ENDIAN);
+
+	CI(use_icache, USE_ICACHE);
+	CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
+	CI(icache_write, ICACHE_ALLOW_WR);
+	ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2;
+	CI(icache_size, ICACHE_BYTE_SIZE);
+	CI(icache_base, ICACHE_BASEADDR);
+	CI(icache_high, ICACHE_HIGHADDR);
+
+	CI(use_dcache, USE_DCACHE);
+	CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
+	CI(dcache_write, DCACHE_ALLOW_WR);
+	ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2;
+	CI(dcache_size, DCACHE_BYTE_SIZE);
+	CI(dcache_base, DCACHE_BASEADDR);
+	CI(dcache_high, DCACHE_HIGHADDR);
+
+	temp = PVR_DCACHE_USE_WRITEBACK(pvr);
+	if (ci->dcache_wb != temp)
+		err_printk("DCACHE WB");
+	ci->dcache_wb = temp;
+
+	CI(use_dopb, D_OPB);
+	CI(use_iopb, I_OPB);
+	CI(use_dlmb, D_LMB);
+	CI(use_ilmb, I_LMB);
+	CI(num_fsl, FSL_LINKS);
+
+	CI(irq_edge, INTERRUPT_IS_EDGE);
+	CI(irq_positive, EDGE_IS_POSITIVE);
+
+	CI(area_optimised, AREA_OPTIMISED);
+
+	CI(hw_debug, DEBUG_ENABLED);
+	CI(num_pc_brk, NUMBER_OF_PC_BRK);
+	CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK);
+	CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK);
+
+	CI(fpga_family_code, TARGET_FAMILY);
+}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
new file mode 100644
index 0000000..85dbda4
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
+static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
+
+#define err_printk(x) \
+	early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
+
+void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
+{
+	u32 i = 0;
+
+	ci->use_instr =
+		(fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
+		(fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) |
+		(fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) |
+		(fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0);
+	if (CONFIG_XILINX_MICROBLAZE0_USE_BARREL)
+		i |= PVR0_USE_BARREL_MASK;
+	if (CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR)
+		i |= PVR2_USE_MSR_INSTR;
+	if (CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR)
+		i |= PVR2_USE_PCMP_INSTR;
+	if (CONFIG_XILINX_MICROBLAZE0_USE_DIV)
+		i |= PVR0_USE_DIV_MASK;
+	if (ci->use_instr != i)
+		err_printk("BARREL, MSR, PCMP or DIV");
+
+	ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul");
+	if (ci->use_mult != CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL)
+		err_printk("HW_MUL");
+	ci->use_mult =
+		(ci->use_mult > 1 ?
+				(PVR2_USE_MUL64_MASK | PVR0_USE_HW_MUL_MASK) :
+				(ci->use_mult == 1 ? PVR0_USE_HW_MUL_MASK : 0));
+
+	ci->use_fpu = fcpu(cpu, "xlnx,use-fpu");
+	if (ci->use_fpu != CONFIG_XILINX_MICROBLAZE0_USE_FPU)
+		err_printk("HW_FPU");
+	ci->use_fpu = (ci->use_fpu > 1 ?
+				(PVR2_USE_FPU2_MASK | PVR0_USE_FPU_MASK) :
+				(ci->use_fpu == 1 ? PVR0_USE_FPU_MASK : 0));
+
+	ci->use_exc =
+		(fcpu(cpu, "xlnx,unaligned-exceptions") ?
+				PVR2_UNALIGNED_EXC_MASK : 0) |
+		(fcpu(cpu, "xlnx,ill-opcode-exception") ?
+				PVR2_ILL_OPCODE_EXC_MASK : 0) |
+		(fcpu(cpu, "xlnx,iopb-bus-exception") ?
+				PVR2_IOPB_BUS_EXC_MASK : 0) |
+		(fcpu(cpu, "xlnx,dopb-bus-exception") ?
+				PVR2_DOPB_BUS_EXC_MASK : 0) |
+		(fcpu(cpu, "xlnx,div-zero-exception") ?
+				PVR2_DIV_ZERO_EXC_MASK : 0) |
+		(fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) |
+		(fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0);
+
+	ci->use_icache = fcpu(cpu, "xlnx,use-icache");
+	ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
+	ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
+	ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
+	if (!ci->icache_line_length) {
+		if (fcpu(cpu, "xlnx,icache-use-fsl"))
+			ci->icache_line_length = 4 << 2;
+		else
+			ci->icache_line_length = 1 << 2;
+	}
+	ci->icache_size = fcpu(cpu, "i-cache-size");
+	ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
+	ci->icache_high = fcpu(cpu, "i-cache-highaddr");
+
+	ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
+	ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
+	ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
+	ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
+	if (!ci->dcache_line_length) {
+		if (fcpu(cpu, "xlnx,dcache-use-fsl"))
+			ci->dcache_line_length = 4 << 2;
+		else
+			ci->dcache_line_length = 1 << 2;
+	}
+	ci->dcache_size = fcpu(cpu, "d-cache-size");
+	ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
+	ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
+	ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
+
+	ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
+	ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
+	ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb");
+	ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb");
+
+	ci->num_fsl = fcpu(cpu, "xlnx,fsl-links");
+	ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge");
+	ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive");
+	ci->area_optimised = 0;
+
+	ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled");
+	ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk");
+	ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk");
+	ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk");
+
+	ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1");
+	ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2");
+
+	ci->mmu = fcpu(cpu, "xlnx,use-mmu");
+	ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr");
+	ci->endian = fcpu(cpu, "xlnx,endianness");
+
+	ci->ver_code = 0;
+	ci->fpga_family_code = 0;
+
+	/* Do various fixups based on CPU version and FPGA family strings */
+
+	/* Resolved the CPU version code */
+	for (i = 0; cpu_ver_lookup[i].s != NULL; i++) {
+		if (strcmp(cpu_ver_lookup[i].s, cpu_ver_string) == 0)
+			ci->ver_code = cpu_ver_lookup[i].k;
+	}
+
+	/* Resolved the fpga family code */
+	for (i = 0; family_string_lookup[i].s != NULL; i++) {
+		if (strcmp(family_string_lookup[i].s, family_string) == 0)
+			ci->fpga_family_code = family_string_lookup[i].k;
+	}
+
+	/* FIXME - mb3 and spartan2 do not exist in PVR */
+	/* This is mb3 and on a non Spartan2 */
+	if (ci->ver_code == 0x20 && ci->fpga_family_code != 0xf0)
+		/* Hardware Multiplier in use */
+		ci->use_mult = 1;
+}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
new file mode 100644
index 0000000..b70bb53
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+const struct cpu_ver_key cpu_ver_lookup[] = {
+	/* These key value are as per MBV field in PVR0 */
+	{"5.00.a", 0x01},
+	{"5.00.b", 0x02},
+	{"5.00.c", 0x03},
+	{"6.00.a", 0x04},
+	{"6.00.b", 0x06},
+	{"7.00.a", 0x05},
+	{"7.00.b", 0x07},
+	{"7.10.a", 0x08},
+	{"7.10.b", 0x09},
+	{"7.10.c", 0x0a},
+	{"7.10.d", 0x0b},
+	{"7.20.a", 0x0c},
+	{"7.20.b", 0x0d},
+	{"7.20.c", 0x0e},
+	{"7.20.d", 0x0f},
+	{"7.30.a", 0x10},
+	{"7.30.b", 0x11},
+	{"8.00.a", 0x12},
+	{"8.00.b", 0x13},
+	{"8.10.a", 0x14},
+	{"8.20.a", 0x15},
+	{"8.20.b", 0x16},
+	{"8.30.a", 0x17},
+	{"8.40.a", 0x18},
+	{"8.40.b", 0x19},
+	{"8.50.a", 0x1a},
+	{"8.50.b", 0x1c},
+	{"8.50.c", 0x1e},
+	{"9.0", 0x1b},
+	{"9.1", 0x1d},
+	{"9.2", 0x1f},
+	{"9.3", 0x20},
+	{"9.4", 0x21},
+	{"9.5", 0x22},
+	{NULL, 0},
+};
+
+/*
+ * FIXME Not sure if the actual key is defined by Xilinx in the PVR
+ */
+const struct family_string_key family_string_lookup[] = {
+	{"virtex2", 0x4},
+	{"virtex2pro", 0x5},
+	{"spartan3", 0x6},
+	{"virtex4", 0x7},
+	{"virtex5", 0x8},
+	{"spartan3e", 0x9},
+	{"spartan3a", 0xa},
+	{"spartan3an", 0xb},
+	{"spartan3adsp", 0xc},
+	{"spartan6", 0xd},
+	{"virtex6", 0xe},
+	{"virtex7", 0xf},
+	/* FIXME There is no key code defined for spartan2 */
+	{"spartan2", 0xf0},
+	{"kintex7", 0x10},
+	{"artix7", 0x11},
+	{"zynq7000", 0x12},
+	{"UltraScale Virtex", 0x13},
+	{"UltraScale Kintex", 0x14},
+	{NULL, 0},
+};
+
+struct cpuinfo cpuinfo;
+static struct device_node *cpu;
+
+void __init setup_cpuinfo(void)
+{
+	cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu");
+	if (!cpu)
+		pr_err("You don't have cpu!!!\n");
+
+	pr_info("%s: initialising\n", __func__);
+
+	switch (cpu_has_pvr()) {
+	case 0:
+		pr_warn("%s: No PVR support. Using static CPU info from FDT\n",
+			__func__);
+		set_cpuinfo_static(&cpuinfo, cpu);
+		break;
+/* FIXME I found weird behavior with MB 7.00.a/b 7.10.a
+ * please do not use FULL PVR with MMU */
+	case 1:
+		pr_info("%s: Using full CPU PVR support\n",
+			__func__);
+		set_cpuinfo_static(&cpuinfo, cpu);
+		set_cpuinfo_pvr_full(&cpuinfo, cpu);
+		break;
+	default:
+		pr_warn("%s: Unsupported PVR setting\n", __func__);
+		set_cpuinfo_static(&cpuinfo, cpu);
+	}
+
+	if (cpuinfo.mmu_privins)
+		pr_warn("%s: Stream instructions enabled"
+			" - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
+}
+
+void __init setup_cpuinfo_clk(void)
+{
+	struct clk *clk;
+
+	clk = of_clk_get(cpu, 0);
+	if (IS_ERR(clk)) {
+		pr_err("ERROR: CPU CCF input clock not found\n");
+		/* take timebase-frequency from DTS */
+		cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency");
+	} else {
+		cpuinfo.cpu_clock_freq = clk_get_rate(clk);
+	}
+
+	if (!cpuinfo.cpu_clock_freq) {
+		pr_err("ERROR: CPU clock frequency not setup\n");
+		BUG();
+	}
+}
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
new file mode 100644
index 0000000..9581d194
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -0,0 +1,158 @@
+/*
+ * CPU-version specific code
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2006-2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+
+#include <linux/bug.h>
+#include <asm/cpuinfo.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <asm/page.h>
+#include <linux/param.h>
+#include <asm/pvr.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+	char *fpga_family = "Unknown";
+	char *cpu_ver = "Unknown";
+	int i;
+
+	/* Denormalised to get the fpga family string */
+	for (i = 0; family_string_lookup[i].s != NULL; i++) {
+		if (cpuinfo.fpga_family_code == family_string_lookup[i].k) {
+			fpga_family = (char *)family_string_lookup[i].s;
+			break;
+		}
+	}
+
+	/* Denormalised to get the hw version string */
+	for (i = 0; cpu_ver_lookup[i].s != NULL; i++) {
+		if (cpuinfo.ver_code == cpu_ver_lookup[i].k) {
+			cpu_ver = (char *)cpu_ver_lookup[i].s;
+			break;
+		}
+	}
+
+	seq_printf(m,
+		   "CPU-Family:	MicroBlaze\n"
+		   "FPGA-Arch:	%s\n"
+		   "CPU-Ver:	%s, %s endian\n"
+		   "CPU-MHz:	%d.%02d\n"
+		   "BogoMips:	%lu.%02lu\n",
+		   fpga_family,
+		   cpu_ver,
+		   cpuinfo.endian ? "little" : "big",
+		   cpuinfo.cpu_clock_freq / 1000000,
+		   cpuinfo.cpu_clock_freq % 1000000,
+		   loops_per_jiffy / (500000 / HZ),
+		   (loops_per_jiffy / (5000 / HZ)) % 100);
+
+	seq_printf(m,
+		   "HW:\n Shift:\t\t%s\n"
+		   " MSR:\t\t%s\n"
+		   " PCMP:\t\t%s\n"
+		   " DIV:\t\t%s\n",
+		   (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no",
+		   (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no",
+		   (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no",
+		   (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no");
+
+	seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu);
+
+	seq_printf(m,
+		   " MUL:\t\t%s\n"
+		   " FPU:\t\t%s\n",
+		   (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" :
+		   (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no",
+		   (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" :
+		   (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no");
+
+	seq_printf(m,
+		   " Exc:\t\t%s%s%s%s%s%s%s%s\n",
+		   (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "",
+		   (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "",
+		   (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "",
+		   (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "",
+		   (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "",
+		   (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "",
+		   (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
+		   (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
+
+	seq_printf(m,
+		   "Stream-insns:\t%sprivileged\n",
+		   cpuinfo.mmu_privins ? "un" : "");
+
+	if (cpuinfo.use_icache)
+		seq_printf(m,
+			   "Icache:\t\t%ukB\tline length:\t%dB\n",
+			   cpuinfo.icache_size >> 10,
+			   cpuinfo.icache_line_length);
+	else
+		seq_puts(m, "Icache:\t\tno\n");
+
+	if (cpuinfo.use_dcache) {
+		seq_printf(m,
+			   "Dcache:\t\t%ukB\tline length:\t%dB\n",
+			   cpuinfo.dcache_size >> 10,
+			   cpuinfo.dcache_line_length);
+		seq_puts(m, "Dcache-Policy:\t");
+		if (cpuinfo.dcache_wb)
+			seq_puts(m, "write-back\n");
+		else
+			seq_puts(m, "write-through\n");
+	} else {
+		seq_puts(m, "Dcache:\t\tno\n");
+	}
+
+	seq_printf(m,
+		   "HW-Debug:\t%s\n",
+		   cpuinfo.hw_debug ? "yes" : "no");
+
+	seq_printf(m,
+		   "PVR-USR1:\t%02x\n"
+		   "PVR-USR2:\t%08x\n",
+		   cpuinfo.pvr_user1,
+		   cpuinfo.pvr_user2);
+
+	seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	int i = *pos;
+
+	return i < NR_CPUS ? (void *) (i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start = c_start,
+	.next = c_next,
+	.stop = c_stop,
+	.show = show_cpuinfo,
+};
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
new file mode 100644
index 0000000..8d0dc6d
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -0,0 +1,80 @@
+/*
+ * Support for MicroBlaze PVR (processor version register)
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/exceptions.h>
+#include <asm/pvr.h>
+
+/*
+ * Until we get an assembler that knows about the pvr registers,
+ * this horrible cruft will have to do.
+ * That hardcoded opcode is mfs r3, rpvrNN
+ */
+
+#define get_single_pvr(pvrid, val)				\
+{								\
+	register unsigned tmp __asm__("r3");			\
+	tmp = 0x0;	/* Prevent warning about unused */	\
+	__asm__ __volatile__ (					\
+			"mfs	%0, rpvr" #pvrid ";"		\
+			: "=r" (tmp) : : "memory");		\
+	val = tmp;						\
+}
+
+/*
+ * Does the CPU support the PVR register?
+ * return value:
+ * 0: no PVR
+ * 1: simple PVR
+ * 2: full PVR
+ *
+ * This must work on all CPU versions, including those before the
+ * PVR was even an option.
+ */
+
+int cpu_has_pvr(void)
+{
+	unsigned long flags;
+	unsigned pvr0;
+
+	local_save_flags(flags);
+
+	/* PVR bit in MSR tells us if there is any support */
+	if (!(flags & PVR_MSR_BIT))
+		return 0;
+
+	get_single_pvr(0, pvr0);
+	pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
+
+	if (pvr0 & PVR0_PVR_FULL_MASK)
+		return 1;
+
+	/* for partial PVR use static cpuinfo */
+	return 2;
+}
+
+void get_pvr(struct pvr_s *p)
+{
+	get_single_pvr(0, p->pvr[0]);
+	get_single_pvr(1, p->pvr[1]);
+	get_single_pvr(2, p->pvr[2]);
+	get_single_pvr(3, p->pvr[3]);
+	get_single_pvr(4, p->pvr[4]);
+	get_single_pvr(5, p->pvr[5]);
+	get_single_pvr(6, p->pvr[6]);
+	get_single_pvr(7, p->pvr[7]);
+	get_single_pvr(8, p->pvr[8]);
+	get_single_pvr(9, p->pvr[9]);
+	get_single_pvr(10, p->pvr[10]);
+	get_single_pvr(11, p->pvr[11]);
+}