File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 0000000..51617e4
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+    Malta Board with FPGA based 34K
+    Sigma Designs TangoX board with a 24K based 8654 SoC.
+    Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+    Guest User address space:   0x00000000 -> 0x40000000
+    Guest Kernel Unmapped:      0x40000000 -> 0x60000000
+    Guest Kernel Mapped:        0x60000000 -> 0x80000000
+
+    Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+    Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+    Both the host kernel and Guest kernel should have the page size set to 16K.
+    This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+    Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+	LL/TLBP/SC.  Since the TLBP instruction causes a trap the reservation gets cleared
+	when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+	This will be fixed in a future release.
+
+(5) Use Host FPU
+    Currently KVM/MIPS emulates a 24K CPU without a FPU.
+    This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 0000000..2ae1282
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+	bool "Virtualization"
+	---help---
+	  Say Y here to get to see options for using your Linux host to run
+	  other operating systems inside virtual machines (guests).
+	  This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+	tristate "Kernel-based Virtual Machine (KVM) support"
+	depends on HAVE_KVM
+	select PREEMPT_NOTIFIERS
+	select ANON_INODES
+	select KVM_MMIO
+	select SRCU
+	---help---
+	  Support for hosting Guest kernels.
+	  Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+	bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+	depends on KVM
+	---help---
+	  When running in Trap & Emulate mode patch privileged
+	  instructions to reduce the number of traps.
+
+	  If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+	bool "Maintain counters for COP0 accesses"
+	depends on KVM
+	---help---
+	  Maintain statistics for Guest COP0 accesses.
+	  A histogram of COP0 accesses is printed when the VM is
+	  shutdown.
+
+	  If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 0000000..637ebbe
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,15 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
+
+kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
+	    interrupt.o stats.o commpage.o \
+	    dyntrans.o trap_emul.o fpu.o
+
+obj-$(CONFIG_KVM)	+= kvm.o
+obj-y			+= callback.o tlb.o
diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c
new file mode 100644
index 0000000..313c2e3
--- /dev/null
+++ b/arch/mips/kvm/callback.c
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
new file mode 100644
index 0000000..2d6e976
--- /dev/null
+++ b/arch/mips/kvm/commpage.c
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * commpage, currently used for Virtual COP0 registers.
+ * Mapped into the guest kernel @ 0x0.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "commpage.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+
+	/* Specific init values for fields */
+	vcpu->arch.cop0 = &page->cop0;
+}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
new file mode 100644
index 0000000..08c5fa2
--- /dev/null
+++ b/arch/mips/kvm/commpage.h
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: commpage: mapped into get kernel space
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+	/* COP0 state is mapped into Guest kernel via commpage */
+	struct mips_coproc cop0;
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
new file mode 100644
index 0000000..521121b
--- /dev/null
+++ b/arch/mips/kvm/dyntrans.c
@@ -0,0 +1,148 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/cacheflush.h>
+
+#include "commpage.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+			       struct kvm_vcpu *vcpu)
+{
+	int result = 0;
+	unsigned long kseg0_opc;
+	uint32_t synci_inst = 0x0;
+
+	/* Replace the CACHE instruction, with a NOP */
+	kseg0_opc =
+	    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+		       (vcpu, (unsigned long) opc));
+	memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+	local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+
+	return result;
+}
+
+/*
+ * Address based CACHE instructions are transformed into synci(s). A little
+ * heavy for just D-cache invalidates, but avoids an expensive trap
+ */
+int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+			    struct kvm_vcpu *vcpu)
+{
+	int result = 0;
+	unsigned long kseg0_opc;
+	uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+	base = (inst >> 21) & 0x1f;
+	offset = inst & 0xffff;
+	synci_inst |= (base << 21);
+	synci_inst |= offset;
+
+	kseg0_opc =
+	    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+		       (vcpu, (unsigned long) opc));
+	memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+	local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+
+	return result;
+}
+
+int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+	int32_t rt, rd, sel;
+	uint32_t mfc0_inst;
+	unsigned long kseg0_opc, flags;
+
+	rt = (inst >> 16) & 0x1f;
+	rd = (inst >> 11) & 0x1f;
+	sel = inst & 0x7;
+
+	if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+		mfc0_inst = CLEAR_TEMPLATE;
+		mfc0_inst |= ((rt & 0x1f) << 16);
+	} else {
+		mfc0_inst = LW_TEMPLATE;
+		mfc0_inst |= ((rt & 0x1f) << 16);
+		mfc0_inst |=
+		    offsetof(struct mips_coproc,
+			     reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+						      cop0);
+	}
+
+	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+		kseg0_opc =
+		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+			       (vcpu, (unsigned long) opc));
+		memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+		local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+		local_irq_save(flags);
+		memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+		local_flush_icache_range((unsigned long)opc,
+					 (unsigned long)opc + 32);
+		local_irq_restore(flags);
+	} else {
+		kvm_err("%s: Invalid address: %p\n", __func__, opc);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+	int32_t rt, rd, sel;
+	uint32_t mtc0_inst = SW_TEMPLATE;
+	unsigned long kseg0_opc, flags;
+
+	rt = (inst >> 16) & 0x1f;
+	rd = (inst >> 11) & 0x1f;
+	sel = inst & 0x7;
+
+	mtc0_inst |= ((rt & 0x1f) << 16);
+	mtc0_inst |=
+	    offsetof(struct mips_coproc,
+		     reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+		kseg0_opc =
+		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+			       (vcpu, (unsigned long) opc));
+		memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+		local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+		local_irq_save(flags);
+		memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+		local_flush_icache_range((unsigned long)opc,
+					 (unsigned long)opc + 32);
+		local_irq_restore(flags);
+	} else {
+		kvm_err("%s: Invalid address: %p\n", __func__, opc);
+		return -EFAULT;
+	}
+
+	return 0;
+}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
new file mode 100644
index 0000000..4c85ab8
--- /dev/null
+++ b/arch/mips/kvm/emulate.c
@@ -0,0 +1,2707 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Instruction/Exception emulation
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "opcode.h"
+#include "interrupt.h"
+#include "commpage.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+	unsigned long instpc)
+{
+	unsigned int dspcontrol;
+	union mips_instruction insn;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	long epc = instpc;
+	long nextpc = KVM_INVALID_INST;
+
+	if (epc & 3)
+		goto unaligned;
+
+	/* Read the instruction */
+	insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+	if (insn.word == KVM_INVALID_INST)
+		return KVM_INVALID_INST;
+
+	switch (insn.i_format.opcode) {
+		/* jr and jalr are in r_format format. */
+	case spec_op:
+		switch (insn.r_format.func) {
+		case jalr_op:
+			arch->gprs[insn.r_format.rd] = epc + 8;
+			/* Fall through */
+		case jr_op:
+			nextpc = arch->gprs[insn.r_format.rs];
+			break;
+		}
+		break;
+
+		/*
+		 * This group contains:
+		 * bltz_op, bgez_op, bltzl_op, bgezl_op,
+		 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+		 */
+	case bcond_op:
+		switch (insn.i_format.rt) {
+		case bltz_op:
+		case bltzl_op:
+			if ((long)arch->gprs[insn.i_format.rs] < 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+
+		case bgez_op:
+		case bgezl_op:
+			if ((long)arch->gprs[insn.i_format.rs] >= 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+
+		case bltzal_op:
+		case bltzall_op:
+			arch->gprs[31] = epc + 8;
+			if ((long)arch->gprs[insn.i_format.rs] < 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+
+		case bgezal_op:
+		case bgezall_op:
+			arch->gprs[31] = epc + 8;
+			if ((long)arch->gprs[insn.i_format.rs] >= 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+		case bposge32_op:
+			if (!cpu_has_dsp)
+				goto sigill;
+
+			dspcontrol = rddsp(0x01);
+
+			if (dspcontrol >= 32)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+		}
+		break;
+
+		/* These are unconditional and in j_format. */
+	case jal_op:
+		arch->gprs[31] = instpc + 8;
+	case j_op:
+		epc += 4;
+		epc >>= 28;
+		epc <<= 28;
+		epc |= (insn.j_format.target << 2);
+		nextpc = epc;
+		break;
+
+		/* These are conditional and in i_format. */
+	case beq_op:
+	case beql_op:
+		if (arch->gprs[insn.i_format.rs] ==
+		    arch->gprs[insn.i_format.rt])
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+	case bne_op:
+	case bnel_op:
+		if (arch->gprs[insn.i_format.rs] !=
+		    arch->gprs[insn.i_format.rt])
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+	case blez_op:		/* not really i_format */
+	case blezl_op:
+		/* rt field assumed to be zero */
+		if ((long)arch->gprs[insn.i_format.rs] <= 0)
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+	case bgtz_op:
+	case bgtzl_op:
+		/* rt field assumed to be zero */
+		if ((long)arch->gprs[insn.i_format.rs] > 0)
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+		/* And now the FPA/cp1 branch instructions. */
+	case cop1_op:
+		kvm_err("%s: unsupported cop1_op\n", __func__);
+		break;
+	}
+
+	return nextpc;
+
+unaligned:
+	kvm_err("%s: unaligned epc\n", __func__);
+	return nextpc;
+
+sigill:
+	kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
+	return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+	unsigned long branch_pc;
+	enum emulation_result er = EMULATE_DONE;
+
+	if (cause & CAUSEF_BD) {
+		branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+		if (branch_pc == KVM_INVALID_INST) {
+			er = EMULATE_FAIL;
+		} else {
+			vcpu->arch.pc = branch_pc;
+			kvm_debug("BD update_pc(): New PC: %#lx\n",
+				  vcpu->arch.pc);
+		}
+	} else
+		vcpu->arch.pc += 4;
+
+	kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+	return er;
+}
+
+/**
+ * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
+ * @vcpu:	Virtual CPU.
+ *
+ * Returns:	1 if the CP0_Count timer is disabled by either the guest
+ *		CP0_Cause.DC bit or the count_ctl.DC bit.
+ *		0 otherwise (in which case CP0_Count timer is running).
+ */
+static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+		(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
+}
+
+/**
+ * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
+ *
+ * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+{
+	s64 now_ns, periods;
+	u64 delta;
+
+	now_ns = ktime_to_ns(now);
+	delta = now_ns + vcpu->arch.count_dyn_bias;
+
+	if (delta >= vcpu->arch.count_period) {
+		/* If delta is out of safe range the bias needs adjusting */
+		periods = div64_s64(now_ns, vcpu->arch.count_period);
+		vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
+		/* Recalculate delta with new bias */
+		delta = now_ns + vcpu->arch.count_dyn_bias;
+	}
+
+	/*
+	 * We've ensured that:
+	 *   delta < count_period
+	 *
+	 * Therefore the intermediate delta*count_hz will never overflow since
+	 * at the boundary condition:
+	 *   delta = count_period
+	 *   delta = NSEC_PER_SEC * 2^32 / count_hz
+	 *   delta * count_hz = NSEC_PER_SEC * 2^32
+	 */
+	return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
+}
+
+/**
+ * kvm_mips_count_time() - Get effective current time.
+ * @vcpu:	Virtual CPU.
+ *
+ * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
+ * except when the master disable bit is set in count_ctl, in which case it is
+ * count_resume, i.e. the time that the count was disabled.
+ *
+ * Returns:	Effective monotonic ktime for CP0_Count.
+ */
+static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+		return vcpu->arch.count_resume;
+
+	return ktime_get();
+}
+
+/**
+ * kvm_mips_read_count_running() - Read the current count value as if running.
+ * @vcpu:	Virtual CPU.
+ * @now:	Kernel time to read CP0_Count at.
+ *
+ * Returns the current guest CP0_Count register at time @now and handles if the
+ * timer interrupt is pending and hasn't been handled yet.
+ *
+ * Returns:	The current value of the guest CP0_Count register.
+ */
+static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	ktime_t expires, threshold;
+	uint32_t count, compare;
+	int running;
+
+	/* Calculate the biased and scaled guest CP0_Count */
+	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+	compare = kvm_read_c0_guest_compare(cop0);
+
+	/*
+	 * Find whether CP0_Count has reached the closest timer interrupt. If
+	 * not, we shouldn't inject it.
+	 */
+	if ((int32_t)(count - compare) < 0)
+		return count;
+
+	/*
+	 * The CP0_Count we're going to return has already reached the closest
+	 * timer interrupt. Quickly check if it really is a new interrupt by
+	 * looking at whether the interval until the hrtimer expiry time is
+	 * less than 1/4 of the timer period.
+	 */
+	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
+	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+	if (ktime_before(expires, threshold)) {
+		/*
+		 * Cancel it while we handle it so there's no chance of
+		 * interference with the timeout handler.
+		 */
+		running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+		/* Nothing should be waiting on the timeout */
+		kvm_mips_callbacks->queue_timer_int(vcpu);
+
+		/*
+		 * Restart the timer if it was running based on the expiry time
+		 * we read, so that we don't push it back 2 periods.
+		 */
+		if (running) {
+			expires = ktime_add_ns(expires,
+					       vcpu->arch.count_period);
+			hrtimer_start(&vcpu->arch.comparecount_timer, expires,
+				      HRTIMER_MODE_ABS);
+		}
+	}
+
+	return count;
+}
+
+/**
+ * kvm_mips_read_count() - Read the current count value.
+ * @vcpu:	Virtual CPU.
+ *
+ * Read the current guest CP0_Count value, taking into account whether the timer
+ * is stopped.
+ *
+ * Returns:	The current guest CP0_Count value.
+ */
+uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	/* If count disabled just read static copy of count */
+	if (kvm_mips_count_disabled(vcpu))
+		return kvm_read_c0_guest_count(cop0);
+
+	return kvm_mips_read_count_running(vcpu, ktime_get());
+}
+
+/**
+ * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
+ * @vcpu:	Virtual CPU.
+ * @count:	Output pointer for CP0_Count value at point of freeze.
+ *
+ * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
+ * at the point it was frozen. It is guaranteed that any pending interrupts at
+ * the point it was frozen are handled, and none after that point.
+ *
+ * This is useful where the time/CP0_Count is needed in the calculation of the
+ * new parameters.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ *
+ * Returns:	The ktime at the point of freeze.
+ */
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
+				       uint32_t *count)
+{
+	ktime_t now;
+
+	/* stop hrtimer before finding time */
+	hrtimer_cancel(&vcpu->arch.comparecount_timer);
+	now = ktime_get();
+
+	/* find count at this point and handle pending hrtimer */
+	*count = kvm_mips_read_count_running(vcpu, now);
+
+	return now;
+}
+
+/**
+ * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
+ * @vcpu:	Virtual CPU.
+ * @now:	ktime at point of resume.
+ * @count:	CP0_Count at point of resume.
+ *
+ * Resumes the timer and updates the timer expiry based on @now and @count.
+ * This can be used in conjunction with kvm_mips_freeze_timer() when timer
+ * parameters need to be changed.
+ *
+ * It is guaranteed that a timer interrupt immediately after resume will be
+ * handled, but not if CP_Compare is exactly at @count. That case is already
+ * handled by kvm_mips_freeze_timer().
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+				    ktime_t now, uint32_t count)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	uint32_t compare;
+	u64 delta;
+	ktime_t expire;
+
+	/* Calculate timeout (wrap 0 to 2^32) */
+	compare = kvm_read_c0_guest_compare(cop0);
+	delta = (u64)(uint32_t)(compare - count - 1) + 1;
+	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
+	expire = ktime_add_ns(now, delta);
+
+	/* Update hrtimer to use new timeout */
+	hrtimer_cancel(&vcpu->arch.comparecount_timer);
+	hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
+}
+
+/**
+ * kvm_mips_write_count() - Modify the count and update timer.
+ * @vcpu:	Virtual CPU.
+ * @count:	Guest CP0_Count value to set.
+ *
+ * Sets the CP0_Count value and updates the timer accordingly.
+ */
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	ktime_t now;
+
+	/* Calculate bias */
+	now = kvm_mips_count_time(vcpu);
+	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+	if (kvm_mips_count_disabled(vcpu))
+		/* The timer's disabled, adjust the static count */
+		kvm_write_c0_guest_count(cop0, count);
+	else
+		/* Update timeout */
+		kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_init_count() - Initialise timer.
+ * @vcpu:	Virtual CPU.
+ *
+ * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
+ * it going if it's enabled.
+ */
+void kvm_mips_init_count(struct kvm_vcpu *vcpu)
+{
+	/* 100 MHz */
+	vcpu->arch.count_hz = 100*1000*1000;
+	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
+					  vcpu->arch.count_hz);
+	vcpu->arch.count_dyn_bias = 0;
+
+	/* Starting at 0 */
+	kvm_mips_write_count(vcpu, 0);
+}
+
+/**
+ * kvm_mips_set_count_hz() - Update the frequency of the timer.
+ * @vcpu:	Virtual CPU.
+ * @count_hz:	Frequency of CP0_Count timer in Hz.
+ *
+ * Change the frequency of the CP0_Count timer. This is done atomically so that
+ * CP0_Count is continuous and no timer interrupt is lost.
+ *
+ * Returns:	-EINVAL if @count_hz is out of range.
+ *		0 on success.
+ */
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	int dc;
+	ktime_t now;
+	u32 count;
+
+	/* ensure the frequency is in a sensible range... */
+	if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
+		return -EINVAL;
+	/* ... and has actually changed */
+	if (vcpu->arch.count_hz == count_hz)
+		return 0;
+
+	/* Safely freeze timer so we can keep it continuous */
+	dc = kvm_mips_count_disabled(vcpu);
+	if (dc) {
+		now = kvm_mips_count_time(vcpu);
+		count = kvm_read_c0_guest_count(cop0);
+	} else {
+		now = kvm_mips_freeze_hrtimer(vcpu, &count);
+	}
+
+	/* Update the frequency */
+	vcpu->arch.count_hz = count_hz;
+	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
+	vcpu->arch.count_dyn_bias = 0;
+
+	/* Calculate adjusted bias so dynamic count is unchanged */
+	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+	/* Update and resume hrtimer */
+	if (!dc)
+		kvm_mips_resume_hrtimer(vcpu, now, count);
+	return 0;
+}
+
+/**
+ * kvm_mips_write_compare() - Modify compare and update timer.
+ * @vcpu:	Virtual CPU.
+ * @compare:	New CP0_Compare value.
+ * @ack:	Whether to acknowledge timer interrupt.
+ *
+ * Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
+ */
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	int dc;
+	u32 old_compare = kvm_read_c0_guest_compare(cop0);
+	ktime_t now;
+	uint32_t count;
+
+	/* if unchanged, must just be an ack */
+	if (old_compare == compare) {
+		if (!ack)
+			return;
+		kvm_mips_callbacks->dequeue_timer_int(vcpu);
+		kvm_write_c0_guest_compare(cop0, compare);
+		return;
+	}
+
+	/* freeze_hrtimer() takes care of timer interrupts <= count */
+	dc = kvm_mips_count_disabled(vcpu);
+	if (!dc)
+		now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+	if (ack)
+		kvm_mips_callbacks->dequeue_timer_int(vcpu);
+
+	kvm_write_c0_guest_compare(cop0, compare);
+
+	/* resume_hrtimer() takes care of timer interrupts > count */
+	if (!dc)
+		kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_count_disable() - Disable count.
+ * @vcpu:	Virtual CPU.
+ *
+ * Disable the CP0_Count timer. A timer interrupt on or before the final stop
+ * time will be handled but not after.
+ *
+ * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
+ * count_ctl.DC has been set (count disabled).
+ *
+ * Returns:	The time that the timer was stopped.
+ */
+static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	uint32_t count;
+	ktime_t now;
+
+	/* Stop hrtimer */
+	hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+	/* Set the static count from the dynamic count, handling pending TI */
+	now = ktime_get();
+	count = kvm_mips_read_count_running(vcpu, now);
+	kvm_write_c0_guest_count(cop0, count);
+
+	return now;
+}
+
+/**
+ * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
+ * @vcpu:	Virtual CPU.
+ *
+ * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
+ * before the final stop time will be handled if the timer isn't disabled by
+ * count_ctl.DC, but not after.
+ *
+ * Assumes CP0_Cause.DC is clear (count enabled).
+ */
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
+	if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+		kvm_mips_count_disable(vcpu);
+}
+
+/**
+ * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
+ * @vcpu:	Virtual CPU.
+ *
+ * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
+ * the start time will be handled if the timer isn't disabled by count_ctl.DC,
+ * potentially before even returning, so the caller should be careful with
+ * ordering of CP0_Cause modifications so as not to lose it.
+ *
+ * Assumes CP0_Cause.DC is set (count disabled).
+ */
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	uint32_t count;
+
+	kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
+
+	/*
+	 * Set the dynamic count to match the static count.
+	 * This starts the hrtimer if count_ctl.DC allows it.
+	 * Otherwise it conveniently updates the biases.
+	 */
+	count = kvm_read_c0_guest_count(cop0);
+	kvm_mips_write_count(vcpu, count);
+}
+
+/**
+ * kvm_mips_set_count_ctl() - Update the count control KVM register.
+ * @vcpu:	Virtual CPU.
+ * @count_ctl:	Count control register new value.
+ *
+ * Set the count control KVM register. The timer is updated accordingly.
+ *
+ * Returns:	-EINVAL if reserved bits are set.
+ *		0 on success.
+ */
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+	s64 delta;
+	ktime_t expire, now;
+	uint32_t count, compare;
+
+	/* Only allow defined bits to be changed */
+	if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
+		return -EINVAL;
+
+	/* Apply new value */
+	vcpu->arch.count_ctl = count_ctl;
+
+	/* Master CP0_Count disable */
+	if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
+		/* Is CP0_Cause.DC already disabling CP0_Count? */
+		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
+			if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
+				/* Just record the current time */
+				vcpu->arch.count_resume = ktime_get();
+		} else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
+			/* disable timer and record current time */
+			vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
+		} else {
+			/*
+			 * Calculate timeout relative to static count at resume
+			 * time (wrap 0 to 2^32).
+			 */
+			count = kvm_read_c0_guest_count(cop0);
+			compare = kvm_read_c0_guest_compare(cop0);
+			delta = (u64)(uint32_t)(compare - count - 1) + 1;
+			delta = div_u64(delta * NSEC_PER_SEC,
+					vcpu->arch.count_hz);
+			expire = ktime_add_ns(vcpu->arch.count_resume, delta);
+
+			/* Handle pending interrupt */
+			now = ktime_get();
+			if (ktime_compare(now, expire) >= 0)
+				/* Nothing should be waiting on the timeout */
+				kvm_mips_callbacks->queue_timer_int(vcpu);
+
+			/* Resume hrtimer without changing bias */
+			count = kvm_mips_read_count_running(vcpu, now);
+			kvm_mips_resume_hrtimer(vcpu, now, count);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * kvm_mips_set_count_resume() - Update the count resume KVM register.
+ * @vcpu:		Virtual CPU.
+ * @count_resume:	Count resume register new value.
+ *
+ * Set the count resume KVM register.
+ *
+ * Returns:	-EINVAL if out of valid range (0..now).
+ *		0 on success.
+ */
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
+{
+	/*
+	 * It doesn't make sense for the resume time to be in the future, as it
+	 * would be possible for the next interrupt to be more than a full
+	 * period in the future.
+	 */
+	if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
+		return -EINVAL;
+
+	vcpu->arch.count_resume = ns_to_ktime(count_resume);
+	return 0;
+}
+
+/**
+ * kvm_mips_count_timeout() - Push timer forward on timeout.
+ * @vcpu:	Virtual CPU.
+ *
+ * Handle an hrtimer event by push the hrtimer forward a period.
+ *
+ * Returns:	The hrtimer_restart value to return to the hrtimer subsystem.
+ */
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
+{
+	/* Add the Count period to the current expiry time */
+	hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
+			       vcpu->arch.count_period);
+	return HRTIMER_RESTART;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+
+	if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+		kvm_clear_c0_guest_status(cop0, ST0_ERL);
+		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+	} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+			  kvm_read_c0_guest_epc(cop0));
+		kvm_clear_c0_guest_status(cop0, ST0_EXL);
+		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+	} else {
+		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+			vcpu->arch.pc);
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+		  vcpu->arch.pending_exceptions);
+
+	++vcpu->stat.wait_exits;
+	trace_kvm_exit(vcpu, WAIT_EXITS);
+	if (!vcpu->arch.pending_exceptions) {
+		vcpu->arch.wait = 1;
+		kvm_vcpu_block(vcpu);
+
+		/*
+		 * We we are runnable, then definitely go off to user space to
+		 * check if any I/O interrupts are pending.
+		 */
+		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+		}
+	}
+
+	return EMULATE_DONE;
+}
+
+/*
+ * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
+ * we can catch this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	uint32_t pc = vcpu->arch.pc;
+
+	kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+	return EMULATE_FAIL;
+}
+
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu:	VCPU with changed mappings.
+ * @tlb:	TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+					  struct kvm_mips_tlb *tlb)
+{
+	int cpu, i;
+	bool user;
+
+	/* No need to flush for entries which are already invalid */
+	if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
+		return;
+	/* User address space doesn't need flushing for KSeg2/3 changes */
+	user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+	preempt_disable();
+
+	/*
+	 * Probe the shadow host TLB for the entry being overwritten, if one
+	 * matches, invalidate it
+	 */
+	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+	/* Invalidate the whole ASID on other CPUs */
+	cpu = smp_processor_id();
+	for_each_possible_cpu(i) {
+		if (i == cpu)
+			continue;
+		if (user)
+			vcpu->arch.guest_user_asid[i] = 0;
+		vcpu->arch.guest_kernel_asid[i] = 0;
+	}
+
+	preempt_enable();
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	int index = kvm_read_c0_guest_index(cop0);
+	struct kvm_mips_tlb *tlb = NULL;
+	uint32_t pc = vcpu->arch.pc;
+
+	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+		kvm_debug("%s: illegal index: %d\n", __func__, index);
+		kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+			  pc, index, kvm_read_c0_guest_entryhi(cop0),
+			  kvm_read_c0_guest_entrylo0(cop0),
+			  kvm_read_c0_guest_entrylo1(cop0),
+			  kvm_read_c0_guest_pagemask(cop0));
+		index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+	}
+
+	tlb = &vcpu->arch.guest_tlb[index];
+
+	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+
+	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+	kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+		  pc, index, kvm_read_c0_guest_entryhi(cop0),
+		  kvm_read_c0_guest_entrylo0(cop0),
+		  kvm_read_c0_guest_entrylo1(cop0),
+		  kvm_read_c0_guest_pagemask(cop0));
+
+	return EMULATE_DONE;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_mips_tlb *tlb = NULL;
+	uint32_t pc = vcpu->arch.pc;
+	int index;
+
+	get_random_bytes(&index, sizeof(index));
+	index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+
+	tlb = &vcpu->arch.guest_tlb[index];
+
+	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+
+	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+	kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+		  pc, index, kvm_read_c0_guest_entryhi(cop0),
+		  kvm_read_c0_guest_entrylo0(cop0),
+		  kvm_read_c0_guest_entrylo1(cop0));
+
+	return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	long entryhi = kvm_read_c0_guest_entryhi(cop0);
+	uint32_t pc = vcpu->arch.pc;
+	int index = -1;
+
+	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+	kvm_write_c0_guest_index(cop0, index);
+
+	kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+		  index);
+
+	return EMULATE_DONE;
+}
+
+/**
+ * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
+ * @vcpu:	Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config1 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
+{
+	unsigned int mask = 0;
+
+	/* Permit FPU to be present if FPU is supported */
+	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+		mask |= MIPS_CONF1_FP;
+
+	return mask;
+}
+
+/**
+ * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
+ * @vcpu:	Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config3 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
+{
+	/* Config4 is optional */
+	unsigned int mask = MIPS_CONF_M;
+
+	/* Permit MSA to be present if MSA is supported */
+	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+		mask |= MIPS_CONF3_MSA;
+
+	return mask;
+}
+
+/**
+ * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
+ * @vcpu:	Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config4 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
+{
+	/* Config5 is optional */
+	return MIPS_CONF_M;
+}
+
+/**
+ * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
+ * @vcpu:	Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config5 CP0
+ * register, by the guest itself.
+ */
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
+{
+	unsigned int mask = 0;
+
+	/* Permit MSAEn changes if MSA supported and enabled */
+	if (kvm_mips_guest_has_msa(&vcpu->arch))
+		mask |= MIPS_CONF5_MSAEN;
+
+	/*
+	 * Permit guest FPU mode changes if FPU is enabled and the relevant
+	 * feature exists according to FIR register.
+	 */
+	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+		if (cpu_has_fre)
+			mask |= MIPS_CONF5_FRE;
+		/* We don't support UFR or UFE */
+	}
+
+	return mask;
+}
+
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
+					   uint32_t cause, struct kvm_run *run,
+					   struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+	int32_t rt, rd, copz, sel, co_bit, op;
+	uint32_t pc = vcpu->arch.pc;
+	unsigned long curr_pc;
+	int cpu, i;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	copz = (inst >> 21) & 0x1f;
+	rt = (inst >> 16) & 0x1f;
+	rd = (inst >> 11) & 0x1f;
+	sel = inst & 0x7;
+	co_bit = (inst >> 25) & 1;
+
+	if (co_bit) {
+		op = (inst) & 0xff;
+
+		switch (op) {
+		case tlbr_op:	/*  Read indexed TLB entry  */
+			er = kvm_mips_emul_tlbr(vcpu);
+			break;
+		case tlbwi_op:	/*  Write indexed  */
+			er = kvm_mips_emul_tlbwi(vcpu);
+			break;
+		case tlbwr_op:	/*  Write random  */
+			er = kvm_mips_emul_tlbwr(vcpu);
+			break;
+		case tlbp_op:	/* TLB Probe */
+			er = kvm_mips_emul_tlbp(vcpu);
+			break;
+		case rfe_op:
+			kvm_err("!!!COP0_RFE!!!\n");
+			break;
+		case eret_op:
+			er = kvm_mips_emul_eret(vcpu);
+			goto dont_update_pc;
+			break;
+		case wait_op:
+			er = kvm_mips_emul_wait(vcpu);
+			break;
+		}
+	} else {
+		switch (copz) {
+		case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+			cop0->stat[rd][sel]++;
+#endif
+			/* Get reg */
+			if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+				vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
+			} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+				vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+			} else {
+				vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+			}
+
+			kvm_debug
+			    ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+			     pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+			break;
+
+		case dmfc_op:
+			vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+			break;
+
+		case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+			cop0->stat[rd][sel]++;
+#endif
+			if ((rd == MIPS_CP0_TLB_INDEX)
+			    && (vcpu->arch.gprs[rt] >=
+				KVM_MIPS_GUEST_TLB_SIZE)) {
+				kvm_err("Invalid TLB Index: %ld",
+					vcpu->arch.gprs[rt]);
+				er = EMULATE_FAIL;
+				break;
+			}
+#define C0_EBASE_CORE_MASK 0xff
+			if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+				/* Preserve CORE number */
+				kvm_change_c0_guest_ebase(cop0,
+							  ~(C0_EBASE_CORE_MASK),
+							  vcpu->arch.gprs[rt]);
+				kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
+					kvm_read_c0_guest_ebase(cop0));
+			} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+				uint32_t nasid =
+					vcpu->arch.gprs[rt] & ASID_MASK;
+				if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
+				    ((kvm_read_c0_guest_entryhi(cop0) &
+				      ASID_MASK) != nasid)) {
+					kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+						kvm_read_c0_guest_entryhi(cop0)
+						& ASID_MASK,
+						vcpu->arch.gprs[rt]
+						& ASID_MASK);
+
+					preempt_disable();
+					/* Blow away the shadow host TLBs */
+					kvm_mips_flush_host_tlb(1);
+					cpu = smp_processor_id();
+					for_each_possible_cpu(i)
+						if (i != cpu) {
+							vcpu->arch.guest_user_asid[i] = 0;
+							vcpu->arch.guest_kernel_asid[i] = 0;
+						}
+					preempt_enable();
+				}
+				kvm_write_c0_guest_entryhi(cop0,
+							   vcpu->arch.gprs[rt]);
+			}
+			/* Are we writing to COUNT */
+			else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
+				goto done;
+			} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+				kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+					  pc, kvm_read_c0_guest_compare(cop0),
+					  vcpu->arch.gprs[rt]);
+
+				/* If we are writing to COMPARE */
+				/* Clear pending timer interrupt, if any */
+				kvm_mips_write_compare(vcpu,
+						       vcpu->arch.gprs[rt],
+						       true);
+			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+				unsigned int old_val, val, change;
+
+				old_val = kvm_read_c0_guest_status(cop0);
+				val = vcpu->arch.gprs[rt];
+				change = val ^ old_val;
+
+				/* Make sure that the NMI bit is never set */
+				val &= ~ST0_NMI;
+
+				/*
+				 * Don't allow CU1 or FR to be set unless FPU
+				 * capability enabled and exists in guest
+				 * configuration.
+				 */
+				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+					val &= ~(ST0_CU1 | ST0_FR);
+
+				/*
+				 * Also don't allow FR to be set if host doesn't
+				 * support it.
+				 */
+				if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+					val &= ~ST0_FR;
+
+
+				/* Handle changes in FPU mode */
+				preempt_disable();
+
+				/*
+				 * FPU and Vector register state is made
+				 * UNPREDICTABLE by a change of FR, so don't
+				 * even bother saving it.
+				 */
+				if (change & ST0_FR)
+					kvm_drop_fpu(vcpu);
+
+				/*
+				 * If MSA state is already live, it is undefined
+				 * how it interacts with FR=0 FPU state, and we
+				 * don't want to hit reserved instruction
+				 * exceptions trying to save the MSA state later
+				 * when CU=1 && FR=1, so play it safe and save
+				 * it first.
+				 */
+				if (change & ST0_CU1 && !(val & ST0_FR) &&
+				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+					kvm_lose_fpu(vcpu);
+
+				/*
+				 * Propagate CU1 (FPU enable) changes
+				 * immediately if the FPU context is already
+				 * loaded. When disabling we leave the context
+				 * loaded so it can be quickly enabled again in
+				 * the near future.
+				 */
+				if (change & ST0_CU1 &&
+				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+					change_c0_status(ST0_CU1, val);
+
+				preempt_enable();
+
+				kvm_write_c0_guest_status(cop0, val);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				/*
+				 * If FPU present, we need CU1/FR bits to take
+				 * effect fairly soon.
+				 */
+				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+					kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+			} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+				unsigned int old_val, val, change, wrmask;
+
+				old_val = kvm_read_c0_guest_config5(cop0);
+				val = vcpu->arch.gprs[rt];
+
+				/* Only a few bits are writable in Config5 */
+				wrmask = kvm_mips_config5_wrmask(vcpu);
+				change = (val ^ old_val) & wrmask;
+				val = old_val ^ change;
+
+
+				/* Handle changes in FPU/MSA modes */
+				preempt_disable();
+
+				/*
+				 * Propagate FRE changes immediately if the FPU
+				 * context is already loaded.
+				 */
+				if (change & MIPS_CONF5_FRE &&
+				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+					change_c0_config5(MIPS_CONF5_FRE, val);
+
+				/*
+				 * Propagate MSAEn changes immediately if the
+				 * MSA context is already loaded. When disabling
+				 * we leave the context loaded so it can be
+				 * quickly enabled again in the near future.
+				 */
+				if (change & MIPS_CONF5_MSAEN &&
+				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+					change_c0_config5(MIPS_CONF5_MSAEN,
+							  val);
+
+				preempt_enable();
+
+				kvm_write_c0_guest_config5(cop0, val);
+			} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
+				uint32_t old_cause, new_cause;
+
+				old_cause = kvm_read_c0_guest_cause(cop0);
+				new_cause = vcpu->arch.gprs[rt];
+				/* Update R/W bits */
+				kvm_change_c0_guest_cause(cop0, 0x08800300,
+							  new_cause);
+				/* DC bit enabling/disabling timer? */
+				if ((old_cause ^ new_cause) & CAUSEF_DC) {
+					if (new_cause & CAUSEF_DC)
+						kvm_mips_count_disable_cause(vcpu);
+					else
+						kvm_mips_count_enable_cause(vcpu);
+				}
+			} else {
+				cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+			}
+
+			kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+				  rd, sel, cop0->reg[rd][sel]);
+			break;
+
+		case dmtc_op:
+			kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+				vcpu->arch.pc, rt, rd, sel);
+			er = EMULATE_FAIL;
+			break;
+
+		case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+			cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+			if (rt != 0) {
+				vcpu->arch.gprs[rt] =
+				    kvm_read_c0_guest_status(cop0);
+			}
+			/* EI */
+			if (inst & 0x20) {
+				kvm_debug("[%#lx] mfmcz_op: EI\n",
+					  vcpu->arch.pc);
+				kvm_set_c0_guest_status(cop0, ST0_IE);
+			} else {
+				kvm_debug("[%#lx] mfmcz_op: DI\n",
+					  vcpu->arch.pc);
+				kvm_clear_c0_guest_status(cop0, ST0_IE);
+			}
+
+			break;
+
+		case wrpgpr_op:
+			{
+				uint32_t css =
+				    cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+				uint32_t pss =
+				    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+				/*
+				 * We don't support any shadow register sets, so
+				 * SRSCtl[PSS] == SRSCtl[CSS] = 0
+				 */
+				if (css || pss) {
+					er = EMULATE_FAIL;
+					break;
+				}
+				kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+					  vcpu->arch.gprs[rt]);
+				vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+			}
+			break;
+		default:
+			kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+				vcpu->arch.pc, copz);
+			er = EMULATE_FAIL;
+			break;
+		}
+	}
+
+done:
+	/* Rollback PC only if emulation was unsuccessful */
+	if (er == EMULATE_FAIL)
+		vcpu->arch.pc = curr_pc;
+
+dont_update_pc:
+	/*
+	 * This is for special instructions whose emulation
+	 * updates the PC, so do not overwrite the PC under
+	 * any circumstances
+	 */
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DO_MMIO;
+	int32_t op, base, rt, offset;
+	uint32_t bytes;
+	void *data = run->mmio.data;
+	unsigned long curr_pc;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	rt = (inst >> 16) & 0x1f;
+	base = (inst >> 21) & 0x1f;
+	offset = inst & 0xffff;
+	op = (inst >> 26) & 0x3f;
+
+	switch (op) {
+	case sb_op:
+		bytes = 1;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.len = bytes;
+		run->mmio.is_write = 1;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 1;
+		*(u8 *) data = vcpu->arch.gprs[rt];
+		kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+			  vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+			  *(uint8_t *) data);
+
+		break;
+
+	case sw_op:
+		bytes = 4;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 1;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 1;
+		*(uint32_t *) data = vcpu->arch.gprs[rt];
+
+		kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+			  vcpu->arch.gprs[rt], *(uint32_t *) data);
+		break;
+
+	case sh_op:
+		bytes = 2;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 1;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 1;
+		*(uint16_t *) data = vcpu->arch.gprs[rt];
+
+		kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+			  vcpu->arch.gprs[rt], *(uint32_t *) data);
+		break;
+
+	default:
+		kvm_err("Store not yet supported");
+		er = EMULATE_FAIL;
+		break;
+	}
+
+	/* Rollback PC if emulation was unsuccessful */
+	if (er == EMULATE_FAIL)
+		vcpu->arch.pc = curr_pc;
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+					    struct kvm_run *run,
+					    struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DO_MMIO;
+	unsigned long curr_pc;
+	int32_t op, base, rt, offset;
+	uint32_t bytes;
+
+	rt = (inst >> 16) & 0x1f;
+	base = (inst >> 21) & 0x1f;
+	offset = inst & 0xffff;
+	op = (inst >> 26) & 0x3f;
+
+	/*
+	 * Find the resume PC now while we have safe and easy access to the
+	 * prior branch instruction, and save it for
+	 * kvm_mips_complete_mmio_load() to restore later.
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+	vcpu->arch.io_pc = vcpu->arch.pc;
+	vcpu->arch.pc = curr_pc;
+
+	vcpu->arch.io_gpr = rt;
+
+	switch (op) {
+	case lw_op:
+		bytes = 4;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 0;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 0;
+		break;
+
+	case lh_op:
+	case lhu_op:
+		bytes = 2;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 0;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 0;
+
+		if (op == lh_op)
+			vcpu->mmio_needed = 2;
+		else
+			vcpu->mmio_needed = 1;
+
+		break;
+
+	case lbu_op:
+	case lb_op:
+		bytes = 1;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 0;
+		vcpu->mmio_is_write = 0;
+
+		if (op == lb_op)
+			vcpu->mmio_needed = 2;
+		else
+			vcpu->mmio_needed = 1;
+
+		break;
+
+	default:
+		kvm_err("Load not yet supported");
+		er = EMULATE_FAIL;
+		break;
+	}
+
+	return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+	unsigned long offset = (va & ~PAGE_MASK);
+	struct kvm *kvm = vcpu->kvm;
+	unsigned long pa;
+	gfn_t gfn;
+	pfn_t pfn;
+
+	gfn = va >> PAGE_SHIFT;
+
+	if (gfn >= kvm->arch.guest_pmap_npages) {
+		kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		return -1;
+	}
+	pfn = kvm->arch.guest_pmap[gfn];
+	pa = (pfn << PAGE_SHIFT) | offset;
+
+	kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
+		  CKSEG0ADDR(pa));
+
+	local_flush_icache_range(CKSEG0ADDR(pa), 32);
+	return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+					     uint32_t cause,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+	int32_t offset, cache, op_inst, op, base;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	unsigned long va;
+	unsigned long curr_pc;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	base = (inst >> 21) & 0x1f;
+	op_inst = (inst >> 16) & 0x1f;
+	offset = (int16_t)inst;
+	cache = (inst >> 16) & 0x3;
+	op = (inst >> 18) & 0x7;
+
+	va = arch->gprs[base] + offset;
+
+	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+		  cache, op, base, arch->gprs[base], offset);
+
+	/*
+	 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
+	 * invalidate the caches entirely by stepping through all the
+	 * ways/indexes
+	 */
+	if (op == MIPS_CACHE_OP_INDEX_INV) {
+		kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+			  vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+			  arch->gprs[base], offset);
+
+		if (cache == MIPS_CACHE_DCACHE)
+			r4k_blast_dcache();
+		else if (cache == MIPS_CACHE_ICACHE)
+			r4k_blast_icache();
+		else {
+			kvm_err("%s: unsupported CACHE INDEX operation\n",
+				__func__);
+			return EMULATE_FAIL;
+		}
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+		kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+		goto done;
+	}
+
+	preempt_disable();
+	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+		    kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+			kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+				__func__, va, vcpu, read_c0_entryhi());
+			er = EMULATE_FAIL;
+			preempt_enable();
+			goto done;
+		}
+	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+		int index;
+
+		/* If an entry already exists then skip */
+		if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
+			goto skip_fault;
+
+		/*
+		 * If address not in the guest TLB, then give the guest a fault,
+		 * the resulting handler will do the right thing
+		 */
+		index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+						  (kvm_read_c0_guest_entryhi
+						   (cop0) & ASID_MASK));
+
+		if (index < 0) {
+			vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+			vcpu->arch.host_cp0_badvaddr = va;
+			er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+							 vcpu);
+			preempt_enable();
+			goto dont_update_pc;
+		} else {
+			struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+			/*
+			 * Check if the entry is valid, if not then setup a TLB
+			 * invalid exception to the guest
+			 */
+			if (!TLB_IS_VALID(*tlb, va)) {
+				er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+								run, vcpu);
+				preempt_enable();
+				goto dont_update_pc;
+			}
+			/*
+			 * We fault an entry from the guest tlb to the
+			 * shadow host TLB
+			 */
+			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+								 NULL, NULL)) {
+				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+					__func__, va, index, vcpu,
+					read_c0_entryhi());
+				er = EMULATE_FAIL;
+				preempt_enable();
+				goto done;
+			}
+		}
+	} else {
+		kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+			cache, op, base, arch->gprs[base], offset);
+		er = EMULATE_FAIL;
+		preempt_enable();
+		goto dont_update_pc;
+
+	}
+
+skip_fault:
+	/* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+	if (cache == MIPS_CACHE_DCACHE
+	    && (op == MIPS_CACHE_OP_FILL_WB_INV
+		|| op == MIPS_CACHE_OP_HIT_INV)) {
+		flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+		/*
+		 * Replace the CACHE instruction, with a SYNCI, not the same,
+		 * but avoids a trap
+		 */
+		kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+	} else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+		flush_dcache_line(va);
+		flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+		/* Replace the CACHE instruction, with a SYNCI */
+		kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+	} else {
+		kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+			cache, op, base, arch->gprs[base], offset);
+		er = EMULATE_FAIL;
+		preempt_enable();
+		goto dont_update_pc;
+	}
+
+	preempt_enable();
+
+dont_update_pc:
+	/* Rollback PC */
+	vcpu->arch.pc = curr_pc;
+done:
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+					    struct kvm_run *run,
+					    struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t inst;
+
+	/* Fetch the instruction. */
+	if (cause & CAUSEF_BD)
+		opc += 1;
+
+	inst = kvm_get_inst(opc, vcpu);
+
+	switch (((union mips_instruction)inst).r_format.opcode) {
+	case cop0_op:
+		er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+		break;
+	case sb_op:
+	case sh_op:
+	case sw_op:
+		er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+		break;
+	case lb_op:
+	case lbu_op:
+	case lhu_op:
+	case lh_op:
+	case lw_op:
+		er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+		break;
+
+	case cache_op:
+		++vcpu->stat.cache_exits;
+		trace_kvm_exit(vcpu, CACHE_EXITS);
+		er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+		break;
+
+	default:
+		kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
+			inst);
+		kvm_arch_vcpu_dump_regs(vcpu);
+		er = EMULATE_FAIL;
+		break;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_SYSCALL << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		/* set pc to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+	} else {
+		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+						 uint32_t *opc,
+						 struct kvm_run *run,
+						 struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	unsigned long entryhi =
+		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+			  arch->pc);
+
+		/* set pc to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x0;
+	} else {
+		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+						 uint32_t *opc,
+						 struct kvm_run *run,
+						 struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	} else {
+		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return EMULATE_DONE;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+#ifdef DEBUG
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+	int index;
+
+	/* If address not in the guest TLB, then we are in trouble */
+	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+	if (index < 0) {
+		/* XXXKYMA Invalidate and retry */
+		kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+		kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+		     __func__, entryhi);
+		kvm_mips_dump_guest_tlbs(vcpu);
+		kvm_mips_dump_host_tlbs();
+		return EMULATE_FAIL;
+	}
+#endif
+
+	er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+			  arch->pc);
+
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	} else {
+		kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+	}
+
+	arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+	kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+	return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_RES_INST << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver RI when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_BREAK << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver BP when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+						uint32_t *opc,
+						struct kvm_run *run,
+						struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_TRAP << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver TRAP when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_MSAFPE << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_FPE << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver FPE when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_MSADIS << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver MSADIS when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+/* ll/sc, rdhwr, sync emulation */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+					 struct kvm_run *run,
+					 struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long curr_pc;
+	uint32_t inst;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	/* Fetch the instruction. */
+	if (cause & CAUSEF_BD)
+		opc += 1;
+
+	inst = kvm_get_inst(opc, vcpu);
+
+	if (inst == KVM_INVALID_INST) {
+		kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
+		return EMULATE_FAIL;
+	}
+
+	if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+		int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+		int rd = (inst & RD) >> 11;
+		int rt = (inst & RT) >> 16;
+		/* If usermode, check RDHWR rd is allowed by guest HWREna */
+		if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
+			kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
+				  rd, opc);
+			goto emulate_ri;
+		}
+		switch (rd) {
+		case 0:	/* CPU number */
+			arch->gprs[rt] = 0;
+			break;
+		case 1:	/* SYNCI length */
+			arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+					     current_cpu_data.icache.linesz);
+			break;
+		case 2:	/* Read count register */
+			arch->gprs[rt] = kvm_mips_read_count(vcpu);
+			break;
+		case 3:	/* Count register resolution */
+			switch (current_cpu_data.cputype) {
+			case CPU_20KC:
+			case CPU_25KF:
+				arch->gprs[rt] = 1;
+				break;
+			default:
+				arch->gprs[rt] = 2;
+			}
+			break;
+		case 29:
+			arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+			break;
+
+		default:
+			kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
+			goto emulate_ri;
+		}
+	} else {
+		kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+		goto emulate_ri;
+	}
+
+	return EMULATE_DONE;
+
+emulate_ri:
+	/*
+	 * Rollback PC (if in branch delay slot then the PC already points to
+	 * branch target), and pass the RI exception to the guest OS.
+	 */
+	vcpu->arch.pc = curr_pc;
+	return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+}
+
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+						  struct kvm_run *run)
+{
+	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+	enum emulation_result er = EMULATE_DONE;
+
+	if (run->mmio.len > sizeof(*gpr)) {
+		kvm_err("Bad MMIO length: %d", run->mmio.len);
+		er = EMULATE_FAIL;
+		goto done;
+	}
+
+	/* Restore saved resume PC */
+	vcpu->arch.pc = vcpu->arch.io_pc;
+
+	switch (run->mmio.len) {
+	case 4:
+		*gpr = *(int32_t *) run->mmio.data;
+		break;
+
+	case 2:
+		if (vcpu->mmio_needed == 2)
+			*gpr = *(int16_t *) run->mmio.data;
+		else
+			*gpr = *(uint16_t *)run->mmio.data;
+
+		break;
+	case 1:
+		if (vcpu->mmio_needed == 2)
+			*gpr = *(int8_t *) run->mmio.data;
+		else
+			*gpr = *(u8 *) run->mmio.data;
+		break;
+	}
+
+done:
+	return er;
+}
+
+static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
+{
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (exccode << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+		kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+		kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+			  exccode, kvm_read_c0_guest_epc(cop0),
+			  kvm_read_c0_guest_badvaddr(cop0));
+	} else {
+		kvm_err("Trying to deliver EXC when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+	int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+	if (usermode) {
+		switch (exccode) {
+		case T_INT:
+		case T_SYSCALL:
+		case T_BREAK:
+		case T_RES_INST:
+		case T_TRAP:
+		case T_MSAFPE:
+		case T_FPE:
+		case T_MSADIS:
+			break;
+
+		case T_COP_UNUSABLE:
+			if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+				er = EMULATE_PRIV_FAIL;
+			break;
+
+		case T_TLB_MOD:
+			break;
+
+		case T_TLB_LD_MISS:
+			/*
+			 * We we are accessing Guest kernel space, then send an
+			 * address error exception to the guest
+			 */
+			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+				kvm_debug("%s: LD MISS @ %#lx\n", __func__,
+					  badvaddr);
+				cause &= ~0xff;
+				cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+				er = EMULATE_PRIV_FAIL;
+			}
+			break;
+
+		case T_TLB_ST_MISS:
+			/*
+			 * We we are accessing Guest kernel space, then send an
+			 * address error exception to the guest
+			 */
+			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+				kvm_debug("%s: ST MISS @ %#lx\n", __func__,
+					  badvaddr);
+				cause &= ~0xff;
+				cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+				er = EMULATE_PRIV_FAIL;
+			}
+			break;
+
+		case T_ADDR_ERR_ST:
+			kvm_debug("%s: address error ST @ %#lx\n", __func__,
+				  badvaddr);
+			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+				cause &= ~0xff;
+				cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+			}
+			er = EMULATE_PRIV_FAIL;
+			break;
+		case T_ADDR_ERR_LD:
+			kvm_debug("%s: address error LD @ %#lx\n", __func__,
+				  badvaddr);
+			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+				cause &= ~0xff;
+				cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+			}
+			er = EMULATE_PRIV_FAIL;
+			break;
+		default:
+			er = EMULATE_PRIV_FAIL;
+			break;
+		}
+	}
+
+	if (er == EMULATE_PRIV_FAIL)
+		kvm_mips_emulate_exc(cause, opc, run, vcpu);
+
+	return er;
+}
+
+/*
+ * User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	unsigned long va = vcpu->arch.host_cp0_badvaddr;
+	int index;
+
+	kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+		  vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+	/*
+	 * KVM would not have got the exception if this entry was valid in the
+	 * shadow host TLB. Check the Guest TLB, if the entry is not there then
+	 * send the guest an exception. The guest exc handler should then inject
+	 * an entry into the guest TLB.
+	 */
+	index = kvm_mips_guest_tlb_lookup(vcpu,
+					  (va & VPN2_MASK) |
+					  (kvm_read_c0_guest_entryhi
+					   (vcpu->arch.cop0) & ASID_MASK));
+	if (index < 0) {
+		if (exccode == T_TLB_LD_MISS) {
+			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+		} else if (exccode == T_TLB_ST_MISS) {
+			er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+		} else {
+			kvm_err("%s: invalid exc code: %d\n", __func__,
+				exccode);
+			er = EMULATE_FAIL;
+		}
+	} else {
+		struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+		/*
+		 * Check if the entry is valid, if not then setup a TLB invalid
+		 * exception to the guest
+		 */
+		if (!TLB_IS_VALID(*tlb, va)) {
+			if (exccode == T_TLB_LD_MISS) {
+				er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+								vcpu);
+			} else if (exccode == T_TLB_ST_MISS) {
+				er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+								vcpu);
+			} else {
+				kvm_err("%s: invalid exc code: %d\n", __func__,
+					exccode);
+				er = EMULATE_FAIL;
+			}
+		} else {
+			kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+				  tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+			/*
+			 * OK we have a Guest TLB entry, now inject it into the
+			 * shadow host TLB
+			 */
+			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+								 NULL, NULL)) {
+				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+					__func__, va, index, vcpu,
+					read_c0_entryhi());
+				er = EMULATE_FAIL;
+			}
+		}
+	}
+
+	return er;
+}
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 0000000..531fbf5
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FPU context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+	.set	noreorder
+	.set	noat
+
+LEAF(__kvm_save_fpu)
+	.set	push
+	.set	mips64r2
+	SET_HARDFLOAT
+	mfc0	t0, CP0_STATUS
+	sll     t0, t0, 5			# is Status.FR set?
+	bgez    t0, 1f				# no: skip odd doubles
+	 nop
+	sdc1	$f1,  VCPU_FPR1(a0)
+	sdc1	$f3,  VCPU_FPR3(a0)
+	sdc1	$f5,  VCPU_FPR5(a0)
+	sdc1	$f7,  VCPU_FPR7(a0)
+	sdc1	$f9,  VCPU_FPR9(a0)
+	sdc1	$f11, VCPU_FPR11(a0)
+	sdc1	$f13, VCPU_FPR13(a0)
+	sdc1	$f15, VCPU_FPR15(a0)
+	sdc1	$f17, VCPU_FPR17(a0)
+	sdc1	$f19, VCPU_FPR19(a0)
+	sdc1	$f21, VCPU_FPR21(a0)
+	sdc1	$f23, VCPU_FPR23(a0)
+	sdc1	$f25, VCPU_FPR25(a0)
+	sdc1	$f27, VCPU_FPR27(a0)
+	sdc1	$f29, VCPU_FPR29(a0)
+	sdc1	$f31, VCPU_FPR31(a0)
+1:	sdc1	$f0,  VCPU_FPR0(a0)
+	sdc1	$f2,  VCPU_FPR2(a0)
+	sdc1	$f4,  VCPU_FPR4(a0)
+	sdc1	$f6,  VCPU_FPR6(a0)
+	sdc1	$f8,  VCPU_FPR8(a0)
+	sdc1	$f10, VCPU_FPR10(a0)
+	sdc1	$f12, VCPU_FPR12(a0)
+	sdc1	$f14, VCPU_FPR14(a0)
+	sdc1	$f16, VCPU_FPR16(a0)
+	sdc1	$f18, VCPU_FPR18(a0)
+	sdc1	$f20, VCPU_FPR20(a0)
+	sdc1	$f22, VCPU_FPR22(a0)
+	sdc1	$f24, VCPU_FPR24(a0)
+	sdc1	$f26, VCPU_FPR26(a0)
+	sdc1	$f28, VCPU_FPR28(a0)
+	jr	ra
+	 sdc1	$f30, VCPU_FPR30(a0)
+	.set	pop
+	END(__kvm_save_fpu)
+
+LEAF(__kvm_restore_fpu)
+	.set	push
+	.set	mips64r2
+	SET_HARDFLOAT
+	mfc0	t0, CP0_STATUS
+	sll     t0, t0, 5			# is Status.FR set?
+	bgez    t0, 1f				# no: skip odd doubles
+	 nop
+	ldc1	$f1,  VCPU_FPR1(a0)
+	ldc1	$f3,  VCPU_FPR3(a0)
+	ldc1	$f5,  VCPU_FPR5(a0)
+	ldc1	$f7,  VCPU_FPR7(a0)
+	ldc1	$f9,  VCPU_FPR9(a0)
+	ldc1	$f11, VCPU_FPR11(a0)
+	ldc1	$f13, VCPU_FPR13(a0)
+	ldc1	$f15, VCPU_FPR15(a0)
+	ldc1	$f17, VCPU_FPR17(a0)
+	ldc1	$f19, VCPU_FPR19(a0)
+	ldc1	$f21, VCPU_FPR21(a0)
+	ldc1	$f23, VCPU_FPR23(a0)
+	ldc1	$f25, VCPU_FPR25(a0)
+	ldc1	$f27, VCPU_FPR27(a0)
+	ldc1	$f29, VCPU_FPR29(a0)
+	ldc1	$f31, VCPU_FPR31(a0)
+1:	ldc1	$f0,  VCPU_FPR0(a0)
+	ldc1	$f2,  VCPU_FPR2(a0)
+	ldc1	$f4,  VCPU_FPR4(a0)
+	ldc1	$f6,  VCPU_FPR6(a0)
+	ldc1	$f8,  VCPU_FPR8(a0)
+	ldc1	$f10, VCPU_FPR10(a0)
+	ldc1	$f12, VCPU_FPR12(a0)
+	ldc1	$f14, VCPU_FPR14(a0)
+	ldc1	$f16, VCPU_FPR16(a0)
+	ldc1	$f18, VCPU_FPR18(a0)
+	ldc1	$f20, VCPU_FPR20(a0)
+	ldc1	$f22, VCPU_FPR22(a0)
+	ldc1	$f24, VCPU_FPR24(a0)
+	ldc1	$f26, VCPU_FPR26(a0)
+	ldc1	$f28, VCPU_FPR28(a0)
+	jr	ra
+	 ldc1	$f30, VCPU_FPR30(a0)
+	.set	pop
+	END(__kvm_restore_fpu)
+
+LEAF(__kvm_restore_fcsr)
+	.set	push
+	SET_HARDFLOAT
+	lw	t0, VCPU_FCR31(a0)
+	/*
+	 * The ctc1 must stay at this offset in __kvm_restore_fcsr.
+	 * See kvm_mips_csr_die_notify() which handles t0 containing a value
+	 * which triggers an FP Exception, which must be stepped over and
+	 * ignored since the set cause bits must remain there for the guest.
+	 */
+	ctc1	t0, fcr31
+	jr	ra
+	 nop
+	.set	pop
+	END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
new file mode 100644
index 0000000..9b44459
--- /dev/null
+++ b/arch/mips/kvm/interrupt.c
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupt delivery
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+	set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+	clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * Cause bits to reflect the pending timer interrupt,
+	 * the EXC code will be set when we are actually
+	 * delivering the interrupt:
+	 */
+	kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+	/* Queue up an INT exception for the core */
+	kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+	kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+	kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+			      struct kvm_mips_interrupt *irq)
+{
+	int intr = (int)irq->irq;
+
+	/*
+	 * Cause bits to reflect the pending IO interrupt,
+	 * the EXC code will be set when we are actually
+	 * delivering the interrupt:
+	 */
+	switch (intr) {
+	case 2:
+		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+		/* Queue up an INT exception for the core */
+		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+		break;
+
+	case 3:
+		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+		break;
+
+	case 4:
+		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+		break;
+
+	default:
+		break;
+	}
+
+}
+
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+				struct kvm_mips_interrupt *irq)
+{
+	int intr = (int)irq->irq;
+
+	switch (intr) {
+	case -2:
+		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+		break;
+
+	case -3:
+		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+		break;
+
+	case -4:
+		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+		break;
+
+	default:
+		break;
+	}
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			    uint32_t cause)
+{
+	int allowed = 0;
+	uint32_t exccode;
+
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	switch (priority) {
+	case MIPS_EXC_INT_TIMER:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	case MIPS_EXC_INT_IO:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	case MIPS_EXC_INT_IPI_1:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	case MIPS_EXC_INT_IPI_2:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	/* Are we allowed to deliver the interrupt ??? */
+	if (allowed) {
+		if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+			/* save old pc */
+			kvm_write_c0_guest_epc(cop0, arch->pc);
+			kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+			if (cause & CAUSEF_BD)
+				kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+			else
+				kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+			kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+		} else
+			kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+		kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+					  (exccode << CAUSEB_EXCCODE));
+
+		/* XXXSL Set PC to the interrupt exception entry point */
+		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+			arch->pc = KVM_GUEST_KSEG0 + 0x200;
+		else
+			arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+		clear_bit(priority, &vcpu->arch.pending_exceptions);
+	}
+
+	return allowed;
+}
+
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			  uint32_t cause)
+{
+	return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+	unsigned long *pending = &vcpu->arch.pending_exceptions;
+	unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+	unsigned int priority;
+
+	if (!(*pending) && !(*pending_clr))
+		return;
+
+	priority = __ffs(*pending_clr);
+	while (priority <= MIPS_EXC_MAX) {
+		if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+			if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+				break;
+		}
+
+		priority = find_next_bit(pending_clr,
+					 BITS_PER_BYTE * sizeof(*pending_clr),
+					 priority + 1);
+	}
+
+	priority = __ffs(*pending);
+	while (priority <= MIPS_EXC_MAX) {
+		if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+			if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+				break;
+		}
+
+		priority = find_next_bit(pending,
+					 BITS_PER_BYTE * sizeof(*pending),
+					 priority + 1);
+	}
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+	return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
new file mode 100644
index 0000000..2143884
--- /dev/null
+++ b/arch/mips/kvm/interrupt.h
@@ -0,0 +1,54 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupts
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/*
+ * MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+extern char __kvm_mips_vcpu_run_end[];
+extern char mips32_exception[], mips32_exceptionEnd[];
+extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+			      struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+				struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			    uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			  uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
new file mode 100644
index 0000000..7770643
--- /dev/null
+++ b/arch/mips/kvm/locore.S
@@ -0,0 +1,655 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+	.set	noreorder
+	.set	noat
+
+FEXPORT(__kvm_mips_vcpu_run)
+	/* k0/k1 not being used in host kernel context */
+	INT_ADDIU k1, sp, -PT_SIZE
+	LONG_S	$0, PT_R0(k1)
+	LONG_S	$1, PT_R1(k1)
+	LONG_S	$2, PT_R2(k1)
+	LONG_S	$3, PT_R3(k1)
+
+	LONG_S	$4, PT_R4(k1)
+	LONG_S	$5, PT_R5(k1)
+	LONG_S	$6, PT_R6(k1)
+	LONG_S	$7, PT_R7(k1)
+
+	LONG_S	$8,  PT_R8(k1)
+	LONG_S	$9,  PT_R9(k1)
+	LONG_S	$10, PT_R10(k1)
+	LONG_S	$11, PT_R11(k1)
+	LONG_S	$12, PT_R12(k1)
+	LONG_S	$13, PT_R13(k1)
+	LONG_S	$14, PT_R14(k1)
+	LONG_S	$15, PT_R15(k1)
+	LONG_S	$16, PT_R16(k1)
+	LONG_S	$17, PT_R17(k1)
+
+	LONG_S	$18, PT_R18(k1)
+	LONG_S	$19, PT_R19(k1)
+	LONG_S	$20, PT_R20(k1)
+	LONG_S	$21, PT_R21(k1)
+	LONG_S	$22, PT_R22(k1)
+	LONG_S	$23, PT_R23(k1)
+	LONG_S	$24, PT_R24(k1)
+	LONG_S	$25, PT_R25(k1)
+
+	/*
+	 * XXXKYMA k0/k1 not saved, not being used if we got here through
+	 * an ioctl()
+	 */
+
+	LONG_S	$28, PT_R28(k1)
+	LONG_S	$29, PT_R29(k1)
+	LONG_S	$30, PT_R30(k1)
+	LONG_S	$31, PT_R31(k1)
+
+	/* Save hi/lo */
+	mflo	v0
+	LONG_S	v0, PT_LO(k1)
+	mfhi	v1
+	LONG_S	v1, PT_HI(k1)
+
+	/* Save host status */
+	mfc0	v0, CP0_STATUS
+	LONG_S	v0, PT_STATUS(k1)
+
+	/* Save host ASID, shove it into the BVADDR location */
+	mfc0	v1, CP0_ENTRYHI
+	andi	v1, 0xff
+	LONG_S	v1, PT_HOST_ASID(k1)
+
+	/* Save DDATA_LO, will be used to store pointer to vcpu */
+	mfc0	v1, CP0_DDATA_LO
+	LONG_S	v1, PT_HOST_USERLOCAL(k1)
+
+	/* DDATA_LO has pointer to vcpu */
+	mtc0	a1, CP0_DDATA_LO
+
+	/* Offset into vcpu->arch */
+	INT_ADDIU k1, a1, VCPU_HOST_ARCH
+
+	/*
+	 * Save the host stack to VCPU, used for exception processing
+	 * when we exit from the Guest
+	 */
+	LONG_S	sp, VCPU_HOST_STACK(k1)
+
+	/* Save the kernel gp as well */
+	LONG_S	gp, VCPU_HOST_GP(k1)
+
+	/*
+	 * Setup status register for running the guest in UM, interrupts
+	 * are disabled
+	 */
+	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
+	mtc0	k0, CP0_STATUS
+	ehb
+
+	/* load up the new EBASE */
+	LONG_L	k0, VCPU_GUEST_EBASE(k1)
+	mtc0	k0, CP0_EBASE
+
+	/*
+	 * Now that the new EBASE has been loaded, unset BEV, set
+	 * interrupt mask as it was but make sure that timer interrupts
+	 * are enabled
+	 */
+	li	k0, (ST0_EXL | KSU_USER | ST0_IE)
+	andi	v0, v0, ST0_IM
+	or	k0, k0, v0
+	mtc0	k0, CP0_STATUS
+	ehb
+
+	/* Set Guest EPC */
+	LONG_L	t0, VCPU_PC(k1)
+	mtc0	t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+	/* Set the ASID for the Guest Kernel */
+	PTR_L	t0, VCPU_COP0(k1)
+	LONG_L	t0, COP0_STATUS(t0)
+	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
+	xori	t0, KSU_USER
+	bnez	t0, 1f		/* If kernel */
+	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+	/* t1: contains the base of the ASID array, need to get the cpu id */
+	LONG_L	t2, TI_CPU($28)             /* smp_processor_id */
+	INT_SLL	t2, t2, 2                   /* x4 */
+	REG_ADDU t3, t1, t2
+	LONG_L	k0, (t3)
+	andi	k0, k0, 0xff
+	mtc0	k0, CP0_ENTRYHI
+	ehb
+
+	/* Disable RDHWR access */
+	mtc0	zero, CP0_HWRENA
+
+	/* Now load up the Guest Context from VCPU */
+	LONG_L	$1, VCPU_R1(k1)
+	LONG_L	$2, VCPU_R2(k1)
+	LONG_L	$3, VCPU_R3(k1)
+
+	LONG_L	$4, VCPU_R4(k1)
+	LONG_L	$5, VCPU_R5(k1)
+	LONG_L	$6, VCPU_R6(k1)
+	LONG_L	$7, VCPU_R7(k1)
+
+	LONG_L	$8, VCPU_R8(k1)
+	LONG_L	$9, VCPU_R9(k1)
+	LONG_L	$10, VCPU_R10(k1)
+	LONG_L	$11, VCPU_R11(k1)
+	LONG_L	$12, VCPU_R12(k1)
+	LONG_L	$13, VCPU_R13(k1)
+	LONG_L	$14, VCPU_R14(k1)
+	LONG_L	$15, VCPU_R15(k1)
+	LONG_L	$16, VCPU_R16(k1)
+	LONG_L	$17, VCPU_R17(k1)
+	LONG_L	$18, VCPU_R18(k1)
+	LONG_L	$19, VCPU_R19(k1)
+	LONG_L	$20, VCPU_R20(k1)
+	LONG_L	$21, VCPU_R21(k1)
+	LONG_L	$22, VCPU_R22(k1)
+	LONG_L	$23, VCPU_R23(k1)
+	LONG_L	$24, VCPU_R24(k1)
+	LONG_L	$25, VCPU_R25(k1)
+
+	/* k0/k1 loaded up later */
+
+	LONG_L	$28, VCPU_R28(k1)
+	LONG_L	$29, VCPU_R29(k1)
+	LONG_L	$30, VCPU_R30(k1)
+	LONG_L	$31, VCPU_R31(k1)
+
+	/* Restore hi/lo */
+	LONG_L	k0, VCPU_LO(k1)
+	mtlo	k0
+
+	LONG_L	k0, VCPU_HI(k1)
+	mthi	k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+	/* Restore the guest's k0/k1 registers */
+	LONG_L	k0, VCPU_R26(k1)
+	LONG_L	k1, VCPU_R27(k1)
+
+	/* Jump to guest */
+	eret
+EXPORT(__kvm_mips_vcpu_run_end)
+
+VECTOR(MIPSX(exception), unknown)
+/* Find out what mode we came from and jump to the proper handler. */
+	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
+	ehb				#02:
+
+	mfc0	k0, CP0_EBASE		#02: Get EBASE
+	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
+	INT_SLL	k0, k0, 10		#04
+	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
+	INT_ADDIU k0, k0, 0x2000	#06: Exception handler is
+					#    installed @ offset 0x2000
+	j	k0			#07: jump to the function
+	 nop				#08: branch delay slot
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+	/* Get the VCPU pointer from DDTATA_LO */
+	mfc0	k1, CP0_DDATA_LO
+	INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+	/* Start saving Guest context to VCPU */
+	LONG_S	$0, VCPU_R0(k1)
+	LONG_S	$1, VCPU_R1(k1)
+	LONG_S	$2, VCPU_R2(k1)
+	LONG_S	$3, VCPU_R3(k1)
+	LONG_S	$4, VCPU_R4(k1)
+	LONG_S	$5, VCPU_R5(k1)
+	LONG_S	$6, VCPU_R6(k1)
+	LONG_S	$7, VCPU_R7(k1)
+	LONG_S	$8, VCPU_R8(k1)
+	LONG_S	$9, VCPU_R9(k1)
+	LONG_S	$10, VCPU_R10(k1)
+	LONG_S	$11, VCPU_R11(k1)
+	LONG_S	$12, VCPU_R12(k1)
+	LONG_S	$13, VCPU_R13(k1)
+	LONG_S	$14, VCPU_R14(k1)
+	LONG_S	$15, VCPU_R15(k1)
+	LONG_S	$16, VCPU_R16(k1)
+	LONG_S	$17, VCPU_R17(k1)
+	LONG_S	$18, VCPU_R18(k1)
+	LONG_S	$19, VCPU_R19(k1)
+	LONG_S	$20, VCPU_R20(k1)
+	LONG_S	$21, VCPU_R21(k1)
+	LONG_S	$22, VCPU_R22(k1)
+	LONG_S	$23, VCPU_R23(k1)
+	LONG_S	$24, VCPU_R24(k1)
+	LONG_S	$25, VCPU_R25(k1)
+
+	/* Guest k0/k1 saved later */
+
+	LONG_S	$28, VCPU_R28(k1)
+	LONG_S	$29, VCPU_R29(k1)
+	LONG_S	$30, VCPU_R30(k1)
+	LONG_S	$31, VCPU_R31(k1)
+
+	/* We need to save hi/lo and restore them on the way out */
+	mfhi	t0
+	LONG_S	t0, VCPU_HI(k1)
+
+	mflo	t0
+	LONG_S	t0, VCPU_LO(k1)
+
+	/* Finally save guest k0/k1 to VCPU */
+	mfc0	t0, CP0_ERROREPC
+	LONG_S	t0, VCPU_R26(k1)
+
+	/* Get GUEST k1 and save it in VCPU */
+	PTR_LI	t1, ~0x2ff
+	mfc0	t0, CP0_EBASE
+	and	t0, t0, t1
+	LONG_L	t0, 0x3000(t0)
+	LONG_S	t0, VCPU_R27(k1)
+
+	/* Now that context has been saved, we can use other registers */
+
+	/* Restore vcpu */
+	mfc0	a1, CP0_DDATA_LO
+	move	s1, a1
+
+	/* Restore run (vcpu->run) */
+	LONG_L	a0, VCPU_RUN(a1)
+	/* Save pointer to run in s0, will be saved by the compiler */
+	move	s0, a0
+
+	/*
+	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
+	 * process the exception
+	 */
+	mfc0	k0,CP0_EPC
+	LONG_S	k0, VCPU_PC(k1)
+
+	mfc0	k0, CP0_BADVADDR
+	LONG_S	k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+	mfc0	k0, CP0_CAUSE
+	LONG_S	k0, VCPU_HOST_CP0_CAUSE(k1)
+
+	mfc0	k0, CP0_ENTRYHI
+	LONG_S	k0, VCPU_HOST_ENTRYHI(k1)
+
+	/* Now restore the host state just enough to run the handlers */
+
+	/* Swtich EBASE to the one used by Linux */
+	/* load up the host EBASE */
+	mfc0	v0, CP0_STATUS
+
+	.set	at
+	or	k0, v0, ST0_BEV
+	.set	noat
+
+	mtc0	k0, CP0_STATUS
+	ehb
+
+	LONG_L	k0, VCPU_HOST_EBASE(k1)
+	mtc0	k0,CP0_EBASE
+
+	/*
+	 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
+	 * trigger FPE for pending exceptions.
+	 */
+	.set	at
+	and	v1, v0, ST0_CU1
+	beqz	v1, 1f
+	 nop
+	.set	push
+	SET_HARDFLOAT
+	cfc1	t0, fcr31
+	sw	t0, VCPU_FCR31(k1)
+	ctc1	zero,fcr31
+	.set	pop
+	.set	noat
+1:
+
+#ifdef CONFIG_CPU_HAS_MSA
+	/*
+	 * If MSA is enabled, save MSACSR and clear it so that later
+	 * instructions don't trigger MSAFPE for pending exceptions.
+	 */
+	mfc0	t0, CP0_CONFIG3
+	ext	t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
+	beqz	t0, 1f
+	 nop
+	mfc0	t0, CP0_CONFIG5
+	ext	t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
+	beqz	t0, 1f
+	 nop
+	_cfcmsa	t0, MSA_CSR
+	sw	t0, VCPU_MSA_CSR(k1)
+	_ctcmsa	MSA_CSR, zero
+1:
+#endif
+
+	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+	.set	at
+	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+	or	v0, v0, ST0_CU0
+	.set	noat
+	mtc0	v0, CP0_STATUS
+	ehb
+
+	/* Load up host GP */
+	LONG_L	gp, VCPU_HOST_GP(k1)
+
+	/* Need a stack before we can jump to "C" */
+	LONG_L	sp, VCPU_HOST_STACK(k1)
+
+	/* Saved host state */
+	INT_ADDIU sp, sp, -PT_SIZE
+
+	/*
+	 * XXXKYMA do we need to load the host ASID, maybe not because the
+	 * kernel entries are marked GLOBAL, need to verify
+	 */
+
+	/* Restore host DDATA_LO */
+	LONG_L	k0, PT_HOST_USERLOCAL(sp)
+	mtc0	k0, CP0_DDATA_LO
+
+	/* Restore RDHWR access */
+	PTR_LI	k0, 0x2000000F
+	mtc0	k0, CP0_HWRENA
+
+	/* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+	/*
+	 * XXXKYMA: not sure if this is safe, how large is the stack??
+	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
+	 * with this in the kernel
+	 */
+	PTR_LA	t9, kvm_mips_handle_exit
+	jalr.hb	t9
+	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
+
+	/* Return from handler Make sure interrupts are disabled */
+	di
+	ehb
+
+	/*
+	 * XXXKYMA: k0/k1 could have been blown away if we processed
+	 * an exception while we were handling the exception from the
+	 * guest, reload k1
+	 */
+
+	move	k1, s1
+	INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+	/*
+	 * Check return value, should tell us if we are returning to the
+	 * host (handle I/O etc)or resuming the guest
+	 */
+	andi	t0, v0, RESUME_HOST
+	bnez	t0, __kvm_mips_return_to_host
+	 nop
+
+__kvm_mips_return_to_guest:
+	/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+	mtc0	s1, CP0_DDATA_LO
+
+	/* Load up the Guest EBASE to minimize the window where BEV is set */
+	LONG_L	t0, VCPU_GUEST_EBASE(k1)
+
+	/* Switch EBASE back to the one used by KVM */
+	mfc0	v1, CP0_STATUS
+	.set	at
+	or	k0, v1, ST0_BEV
+	.set	noat
+	mtc0	k0, CP0_STATUS
+	ehb
+	mtc0	t0, CP0_EBASE
+
+	/* Setup status register for running guest in UM */
+	.set	at
+	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+	and	v1, v1, ~(ST0_CU0 | ST0_MX)
+	.set	noat
+	mtc0	v1, CP0_STATUS
+	ehb
+
+	/* Set Guest EPC */
+	LONG_L	t0, VCPU_PC(k1)
+	mtc0	t0, CP0_EPC
+
+	/* Set the ASID for the Guest Kernel */
+	PTR_L	t0, VCPU_COP0(k1)
+	LONG_L	t0, COP0_STATUS(t0)
+	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
+	xori	t0, KSU_USER
+	bnez	t0, 1f		/* If kernel */
+	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+	/* t1: contains the base of the ASID array, need to get the cpu id  */
+	LONG_L	t2, TI_CPU($28)		/* smp_processor_id */
+	INT_SLL	t2, t2, 2		/* x4 */
+	REG_ADDU t3, t1, t2
+	LONG_L	k0, (t3)
+	andi	k0, k0, 0xff
+	mtc0	k0,CP0_ENTRYHI
+	ehb
+
+	/* Disable RDHWR access */
+	mtc0    zero,  CP0_HWRENA
+
+	/* load the guest context from VCPU and return */
+	LONG_L	$0, VCPU_R0(k1)
+	LONG_L	$1, VCPU_R1(k1)
+	LONG_L	$2, VCPU_R2(k1)
+	LONG_L	$3, VCPU_R3(k1)
+	LONG_L	$4, VCPU_R4(k1)
+	LONG_L	$5, VCPU_R5(k1)
+	LONG_L	$6, VCPU_R6(k1)
+	LONG_L	$7, VCPU_R7(k1)
+	LONG_L	$8, VCPU_R8(k1)
+	LONG_L	$9, VCPU_R9(k1)
+	LONG_L	$10, VCPU_R10(k1)
+	LONG_L	$11, VCPU_R11(k1)
+	LONG_L	$12, VCPU_R12(k1)
+	LONG_L	$13, VCPU_R13(k1)
+	LONG_L	$14, VCPU_R14(k1)
+	LONG_L	$15, VCPU_R15(k1)
+	LONG_L	$16, VCPU_R16(k1)
+	LONG_L	$17, VCPU_R17(k1)
+	LONG_L	$18, VCPU_R18(k1)
+	LONG_L	$19, VCPU_R19(k1)
+	LONG_L	$20, VCPU_R20(k1)
+	LONG_L	$21, VCPU_R21(k1)
+	LONG_L	$22, VCPU_R22(k1)
+	LONG_L	$23, VCPU_R23(k1)
+	LONG_L	$24, VCPU_R24(k1)
+	LONG_L	$25, VCPU_R25(k1)
+
+	/* $/k1 loaded later */
+	LONG_L	$28, VCPU_R28(k1)
+	LONG_L	$29, VCPU_R29(k1)
+	LONG_L	$30, VCPU_R30(k1)
+	LONG_L	$31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+	LONG_L	k0, VCPU_HI(k1)
+	mthi	k0
+
+	LONG_L	k0, VCPU_LO(k1)
+	mtlo	k0
+
+	LONG_L	k0, VCPU_R26(k1)
+	LONG_L	k1, VCPU_R27(k1)
+
+	eret
+
+__kvm_mips_return_to_host:
+	/* EBASE is already pointing to Linux */
+	LONG_L	k1, VCPU_HOST_STACK(k1)
+	INT_ADDIU k1,k1, -PT_SIZE
+
+	/* Restore host DDATA_LO */
+	LONG_L	k0, PT_HOST_USERLOCAL(k1)
+	mtc0	k0, CP0_DDATA_LO
+
+	/* Restore host ASID */
+	LONG_L	k0, PT_HOST_ASID(sp)
+	andi	k0, 0xff
+	mtc0	k0,CP0_ENTRYHI
+	ehb
+
+	/* Load context saved on the host stack */
+	LONG_L	$0, PT_R0(k1)
+	LONG_L	$1, PT_R1(k1)
+
+	/*
+	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
+	 * to recover the err code
+	 */
+	INT_SRA	k0, v0, 2
+	move	$2, k0
+
+	LONG_L	$3, PT_R3(k1)
+	LONG_L	$4, PT_R4(k1)
+	LONG_L	$5, PT_R5(k1)
+	LONG_L	$6, PT_R6(k1)
+	LONG_L	$7, PT_R7(k1)
+	LONG_L	$8, PT_R8(k1)
+	LONG_L	$9, PT_R9(k1)
+	LONG_L	$10, PT_R10(k1)
+	LONG_L	$11, PT_R11(k1)
+	LONG_L	$12, PT_R12(k1)
+	LONG_L	$13, PT_R13(k1)
+	LONG_L	$14, PT_R14(k1)
+	LONG_L	$15, PT_R15(k1)
+	LONG_L	$16, PT_R16(k1)
+	LONG_L	$17, PT_R17(k1)
+	LONG_L	$18, PT_R18(k1)
+	LONG_L	$19, PT_R19(k1)
+	LONG_L	$20, PT_R20(k1)
+	LONG_L	$21, PT_R21(k1)
+	LONG_L	$22, PT_R22(k1)
+	LONG_L	$23, PT_R23(k1)
+	LONG_L	$24, PT_R24(k1)
+	LONG_L	$25, PT_R25(k1)
+
+	/* Host k0/k1 were not saved */
+
+	LONG_L	$28, PT_R28(k1)
+	LONG_L	$29, PT_R29(k1)
+	LONG_L	$30, PT_R30(k1)
+
+	LONG_L	k0, PT_HI(k1)
+	mthi	k0
+
+	LONG_L	k0, PT_LO(k1)
+	mtlo	k0
+
+	/* Restore RDHWR access */
+	PTR_LI	k0, 0x2000000F
+	mtc0	k0,  CP0_HWRENA
+
+	/* Restore RA, which is the address we will return to */
+	LONG_L  ra, PT_R31(k1)
+	j       ra
+	 nop
+
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+	####
+	##### The exception handlers.
+	#####
+	.word _C_LABEL(MIPSX(GuestException))	#  0
+	.word _C_LABEL(MIPSX(GuestException))	#  1
+	.word _C_LABEL(MIPSX(GuestException))	#  2
+	.word _C_LABEL(MIPSX(GuestException))	#  3
+	.word _C_LABEL(MIPSX(GuestException))	#  4
+	.word _C_LABEL(MIPSX(GuestException))	#  5
+	.word _C_LABEL(MIPSX(GuestException))	#  6
+	.word _C_LABEL(MIPSX(GuestException))	#  7
+	.word _C_LABEL(MIPSX(GuestException))	#  8
+	.word _C_LABEL(MIPSX(GuestException))	#  9
+	.word _C_LABEL(MIPSX(GuestException))	# 10
+	.word _C_LABEL(MIPSX(GuestException))	# 11
+	.word _C_LABEL(MIPSX(GuestException))	# 12
+	.word _C_LABEL(MIPSX(GuestException))	# 13
+	.word _C_LABEL(MIPSX(GuestException))	# 14
+	.word _C_LABEL(MIPSX(GuestException))	# 15
+	.word _C_LABEL(MIPSX(GuestException))	# 16
+	.word _C_LABEL(MIPSX(GuestException))	# 17
+	.word _C_LABEL(MIPSX(GuestException))	# 18
+	.word _C_LABEL(MIPSX(GuestException))	# 19
+	.word _C_LABEL(MIPSX(GuestException))	# 20
+	.word _C_LABEL(MIPSX(GuestException))	# 21
+	.word _C_LABEL(MIPSX(GuestException))	# 22
+	.word _C_LABEL(MIPSX(GuestException))	# 23
+	.word _C_LABEL(MIPSX(GuestException))	# 24
+	.word _C_LABEL(MIPSX(GuestException))	# 25
+	.word _C_LABEL(MIPSX(GuestException))	# 26
+	.word _C_LABEL(MIPSX(GuestException))	# 27
+	.word _C_LABEL(MIPSX(GuestException))	# 28
+	.word _C_LABEL(MIPSX(GuestException))	# 29
+	.word _C_LABEL(MIPSX(GuestException))	# 30
+	.word _C_LABEL(MIPSX(GuestException))	# 31
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
new file mode 100644
index 0000000..a017b23
--- /dev/null
+++ b/arch/mips/kvm/mips.c
@@ -0,0 +1,1672 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+#include "commpage.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100	/* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
+	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
+	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
+	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
+	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
+	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
+	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
+	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
+	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
+	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
+	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
+	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
+	{ "trap_inst",	  VCPU_STAT(trap_inst_exits),	 KVM_STAT_VCPU },
+	{ "msa_fpe",	  VCPU_STAT(msa_fpe_exits),	 KVM_STAT_VCPU },
+	{ "fpe",	  VCPU_STAT(fpe_exits),		 KVM_STAT_VCPU },
+	{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
+	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
+	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
+	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
+	{NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		vcpu->arch.guest_kernel_asid[i] = 0;
+		vcpu->arch.guest_user_asid[i] = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * XXXKYMA: We are simulatoring a processor that has the WII bit set in
+ * Config7, so we are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+	return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+	return 1;
+}
+
+int kvm_arch_hardware_enable(void)
+{
+	return 0;
+}
+
+int kvm_arch_hardware_setup(void)
+{
+	return 0;
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+	*(int *)rtn = 0;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+	unsigned long wired;
+
+	/*
+	 * Add a wired entry to the TLB, it is used to map the commpage to
+	 * the Guest kernel
+	 */
+	wired = read_c0_wired();
+	write_c0_wired(wired + 1);
+	mtc0_tlbw_hazard();
+	kvm->arch.commpage_tlb = wired;
+
+	kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+		  kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+	struct kvm *kvm = (struct kvm *)arg;
+
+	kvm_mips_init_tlbs(kvm);
+	kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+	if (atomic_inc_return(&kvm_mips_instance) == 1) {
+		kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
+			  __func__);
+		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+	}
+
+	return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+	unsigned int i;
+	struct kvm_vcpu *vcpu;
+
+	/* Put the pages we reserved for the guest pmap */
+	for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+	}
+	kfree(kvm->arch.guest_pmap);
+
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		kvm_arch_vcpu_free(vcpu);
+	}
+
+	mutex_lock(&kvm->lock);
+
+	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+		kvm->vcpus[i] = NULL;
+
+	atomic_set(&kvm->online_vcpus, 0);
+
+	mutex_unlock(&kvm->lock);
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+	/* Restore wired count */
+	write_c0_wired(0);
+	mtc0_tlbw_hazard();
+	/* Clear out all the TLBs */
+	kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+	kvm_mips_free_vcpus(kvm);
+
+	/* If this is the last instance, restore wired count */
+	if (atomic_dec_return(&kvm_mips_instance) == 0) {
+		kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
+			  __func__);
+		on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+	}
+}
+
+long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
+			unsigned long arg)
+{
+	return -ENOIOCTLCMD;
+}
+
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
+{
+	return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+				   struct kvm_memory_slot *memslot,
+				   const struct kvm_userspace_memory_region *mem,
+				   enum kvm_mr_change change)
+{
+	return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+				   const struct kvm_userspace_memory_region *mem,
+				   const struct kvm_memory_slot *old,
+				   const struct kvm_memory_slot *new,
+				   enum kvm_mr_change change)
+{
+	unsigned long npages = 0;
+	int i;
+
+	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+		  __func__, kvm, mem->slot, mem->guest_phys_addr,
+		  mem->memory_size, mem->userspace_addr);
+
+	/* Setup Guest PMAP table */
+	if (!kvm->arch.guest_pmap) {
+		if (mem->slot == 0)
+			npages = mem->memory_size >> PAGE_SHIFT;
+
+		if (npages) {
+			kvm->arch.guest_pmap_npages = npages;
+			kvm->arch.guest_pmap =
+			    kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+			if (!kvm->arch.guest_pmap) {
+				kvm_err("Failed to allocate guest PMAP");
+				return;
+			}
+
+			kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+				  npages, kvm->arch.guest_pmap);
+
+			/* Now setup the page table */
+			for (i = 0; i < npages; i++)
+				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+		}
+	}
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+	int err, size, offset;
+	void *gebase;
+	int i;
+
+	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+	if (!vcpu) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = kvm_vcpu_init(vcpu, kvm, id);
+
+	if (err)
+		goto out_free_cpu;
+
+	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+	/*
+	 * Allocate space for host mode exception handlers that handle
+	 * guest mode exits
+	 */
+	if (cpu_has_veic || cpu_has_vint)
+		size = 0x200 + VECTORSPACING * 64;
+	else
+		size = 0x4000;
+
+	/* Save Linux EBASE */
+	vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+	if (!gebase) {
+		err = -ENOMEM;
+		goto out_uninit_cpu;
+	}
+	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+		  ALIGN(size, PAGE_SIZE), gebase);
+
+	/* Save new ebase */
+	vcpu->arch.guest_ebase = gebase;
+
+	/* Copy L1 Guest Exception handler to correct offset */
+
+	/* TLB Refill, EXL = 0 */
+	memcpy(gebase, mips32_exception,
+	       mips32_exceptionEnd - mips32_exception);
+
+	/* General Exception Entry point */
+	memcpy(gebase + 0x180, mips32_exception,
+	       mips32_exceptionEnd - mips32_exception);
+
+	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
+	for (i = 0; i < 8; i++) {
+		kvm_debug("L1 Vectored handler @ %p\n",
+			  gebase + 0x200 + (i * VECTORSPACING));
+		memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+		       mips32_exceptionEnd - mips32_exception);
+	}
+
+	/* General handler, relocate to unmapped space for sanity's sake */
+	offset = 0x2000;
+	kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
+		  gebase + offset,
+		  mips32_GuestExceptionEnd - mips32_GuestException);
+
+	memcpy(gebase + offset, mips32_GuestException,
+	       mips32_GuestExceptionEnd - mips32_GuestException);
+
+#ifdef MODULE
+	offset += mips32_GuestExceptionEnd - mips32_GuestException;
+	memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+	       __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+	vcpu->arch.vcpu_run = gebase + offset;
+#else
+	vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
+	/* Invalidate the icache for these ranges */
+	flush_icache_range((unsigned long)gebase,
+			   (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+
+	/*
+	 * Allocate comm page for guest kernel, a TLB will be reserved for
+	 * mapping GVA @ 0xFFFF8000 to this page
+	 */
+	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+	if (!vcpu->arch.kseg0_commpage) {
+		err = -ENOMEM;
+		goto out_free_gebase;
+	}
+
+	kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+	kvm_mips_commpage_init(vcpu);
+
+	/* Init */
+	vcpu->arch.last_sched_cpu = -1;
+
+	/* Start off the timer */
+	kvm_mips_init_count(vcpu);
+
+	return vcpu;
+
+out_free_gebase:
+	kfree(gebase);
+
+out_uninit_cpu:
+	kvm_vcpu_uninit(vcpu);
+
+out_free_cpu:
+	kfree(vcpu);
+
+out:
+	return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+	kvm_vcpu_uninit(vcpu);
+
+	kvm_mips_dump_stats(vcpu);
+
+	kfree(vcpu->arch.guest_ebase);
+	kfree(vcpu->arch.kseg0_commpage);
+	kfree(vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+	kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+					struct kvm_guest_debug *dbg)
+{
+	return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	int r = 0;
+	sigset_t sigsaved;
+
+	if (vcpu->sigset_active)
+		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+	if (vcpu->mmio_needed) {
+		if (!vcpu->mmio_is_write)
+			kvm_mips_complete_mmio_load(vcpu, run);
+		vcpu->mmio_needed = 0;
+	}
+
+	lose_fpu(1);
+
+	local_irq_disable();
+	/* Check if we have any exceptions/interrupts pending */
+	kvm_mips_deliver_interrupts(vcpu,
+				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+	__kvm_guest_enter();
+
+	/* Disable hardware page table walking while in guest */
+	htw_stop();
+
+	r = vcpu->arch.vcpu_run(run, vcpu);
+
+	/* Re-enable HTW before enabling interrupts */
+	htw_start();
+
+	__kvm_guest_exit();
+	local_irq_enable();
+
+	if (vcpu->sigset_active)
+		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+	return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+			     struct kvm_mips_interrupt *irq)
+{
+	int intr = (int)irq->irq;
+	struct kvm_vcpu *dvcpu = NULL;
+
+	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+			  (int)intr);
+
+	if (irq->cpu == -1)
+		dvcpu = vcpu;
+	else
+		dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+	if (intr == 2 || intr == 3 || intr == 4) {
+		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+	} else if (intr == -2 || intr == -3 || intr == -4) {
+		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+	} else {
+		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+			irq->cpu, irq->irq);
+		return -EINVAL;
+	}
+
+	dvcpu->arch.wait = 0;
+
+	if (waitqueue_active(&dvcpu->wq))
+		wake_up_interruptible(&dvcpu->wq);
+
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+				    struct kvm_mp_state *mp_state)
+{
+	return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+				    struct kvm_mp_state *mp_state)
+{
+	return -ENOIOCTLCMD;
+}
+
+static u64 kvm_mips_get_one_regs[] = {
+	KVM_REG_MIPS_R0,
+	KVM_REG_MIPS_R1,
+	KVM_REG_MIPS_R2,
+	KVM_REG_MIPS_R3,
+	KVM_REG_MIPS_R4,
+	KVM_REG_MIPS_R5,
+	KVM_REG_MIPS_R6,
+	KVM_REG_MIPS_R7,
+	KVM_REG_MIPS_R8,
+	KVM_REG_MIPS_R9,
+	KVM_REG_MIPS_R10,
+	KVM_REG_MIPS_R11,
+	KVM_REG_MIPS_R12,
+	KVM_REG_MIPS_R13,
+	KVM_REG_MIPS_R14,
+	KVM_REG_MIPS_R15,
+	KVM_REG_MIPS_R16,
+	KVM_REG_MIPS_R17,
+	KVM_REG_MIPS_R18,
+	KVM_REG_MIPS_R19,
+	KVM_REG_MIPS_R20,
+	KVM_REG_MIPS_R21,
+	KVM_REG_MIPS_R22,
+	KVM_REG_MIPS_R23,
+	KVM_REG_MIPS_R24,
+	KVM_REG_MIPS_R25,
+	KVM_REG_MIPS_R26,
+	KVM_REG_MIPS_R27,
+	KVM_REG_MIPS_R28,
+	KVM_REG_MIPS_R29,
+	KVM_REG_MIPS_R30,
+	KVM_REG_MIPS_R31,
+
+	KVM_REG_MIPS_HI,
+	KVM_REG_MIPS_LO,
+	KVM_REG_MIPS_PC,
+
+	KVM_REG_MIPS_CP0_INDEX,
+	KVM_REG_MIPS_CP0_CONTEXT,
+	KVM_REG_MIPS_CP0_USERLOCAL,
+	KVM_REG_MIPS_CP0_PAGEMASK,
+	KVM_REG_MIPS_CP0_WIRED,
+	KVM_REG_MIPS_CP0_HWRENA,
+	KVM_REG_MIPS_CP0_BADVADDR,
+	KVM_REG_MIPS_CP0_COUNT,
+	KVM_REG_MIPS_CP0_ENTRYHI,
+	KVM_REG_MIPS_CP0_COMPARE,
+	KVM_REG_MIPS_CP0_STATUS,
+	KVM_REG_MIPS_CP0_CAUSE,
+	KVM_REG_MIPS_CP0_EPC,
+	KVM_REG_MIPS_CP0_PRID,
+	KVM_REG_MIPS_CP0_CONFIG,
+	KVM_REG_MIPS_CP0_CONFIG1,
+	KVM_REG_MIPS_CP0_CONFIG2,
+	KVM_REG_MIPS_CP0_CONFIG3,
+	KVM_REG_MIPS_CP0_CONFIG4,
+	KVM_REG_MIPS_CP0_CONFIG5,
+	KVM_REG_MIPS_CP0_CONFIG7,
+	KVM_REG_MIPS_CP0_ERROREPC,
+
+	KVM_REG_MIPS_COUNT_CTL,
+	KVM_REG_MIPS_COUNT_RESUME,
+	KVM_REG_MIPS_COUNT_HZ,
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+			    const struct kvm_one_reg *reg)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+	int ret;
+	s64 v;
+	s64 vs[2];
+	unsigned int idx;
+
+	switch (reg->id) {
+	/* General purpose registers */
+	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+		break;
+	case KVM_REG_MIPS_HI:
+		v = (long)vcpu->arch.hi;
+		break;
+	case KVM_REG_MIPS_LO:
+		v = (long)vcpu->arch.lo;
+		break;
+	case KVM_REG_MIPS_PC:
+		v = (long)vcpu->arch.pc;
+		break;
+
+	/* Floating point registers */
+	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+		/* Odd singles in top of even double when FR=0 */
+		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+			v = get_fpr32(&fpu->fpr[idx], 0);
+		else
+			v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
+		break;
+	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+		/* Can't access odd doubles in FR=0 mode */
+		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+			return -EINVAL;
+		v = get_fpr64(&fpu->fpr[idx], 0);
+		break;
+	case KVM_REG_MIPS_FCR_IR:
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		v = boot_cpu_data.fpu_id;
+		break;
+	case KVM_REG_MIPS_FCR_CSR:
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		v = fpu->fcr31;
+		break;
+
+	/* MIPS SIMD Architecture (MSA) registers */
+	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+		if (!kvm_mips_guest_has_msa(&vcpu->arch))
+			return -EINVAL;
+		/* Can't access MSA registers in FR=0 mode */
+		if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
+			return -EINVAL;
+		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+		/* least significant byte first */
+		vs[0] = get_fpr64(&fpu->fpr[idx], 0);
+		vs[1] = get_fpr64(&fpu->fpr[idx], 1);
+#else
+		/* most significant byte first */
+		vs[0] = get_fpr64(&fpu->fpr[idx], 1);
+		vs[1] = get_fpr64(&fpu->fpr[idx], 0);
+#endif
+		break;
+	case KVM_REG_MIPS_MSA_IR:
+		if (!kvm_mips_guest_has_msa(&vcpu->arch))
+			return -EINVAL;
+		v = boot_cpu_data.msa_id;
+		break;
+	case KVM_REG_MIPS_MSA_CSR:
+		if (!kvm_mips_guest_has_msa(&vcpu->arch))
+			return -EINVAL;
+		v = fpu->msacsr;
+		break;
+
+	/* Co-processor 0 registers */
+	case KVM_REG_MIPS_CP0_INDEX:
+		v = (long)kvm_read_c0_guest_index(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONTEXT:
+		v = (long)kvm_read_c0_guest_context(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_USERLOCAL:
+		v = (long)kvm_read_c0_guest_userlocal(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_PAGEMASK:
+		v = (long)kvm_read_c0_guest_pagemask(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_WIRED:
+		v = (long)kvm_read_c0_guest_wired(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_HWRENA:
+		v = (long)kvm_read_c0_guest_hwrena(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_BADVADDR:
+		v = (long)kvm_read_c0_guest_badvaddr(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_ENTRYHI:
+		v = (long)kvm_read_c0_guest_entryhi(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_COMPARE:
+		v = (long)kvm_read_c0_guest_compare(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_STATUS:
+		v = (long)kvm_read_c0_guest_status(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CAUSE:
+		v = (long)kvm_read_c0_guest_cause(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_EPC:
+		v = (long)kvm_read_c0_guest_epc(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_PRID:
+		v = (long)kvm_read_c0_guest_prid(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG:
+		v = (long)kvm_read_c0_guest_config(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG1:
+		v = (long)kvm_read_c0_guest_config1(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG2:
+		v = (long)kvm_read_c0_guest_config2(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG3:
+		v = (long)kvm_read_c0_guest_config3(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG4:
+		v = (long)kvm_read_c0_guest_config4(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG5:
+		v = (long)kvm_read_c0_guest_config5(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG7:
+		v = (long)kvm_read_c0_guest_config7(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_ERROREPC:
+		v = (long)kvm_read_c0_guest_errorepc(cop0);
+		break;
+	/* registers to be handled specially */
+	case KVM_REG_MIPS_CP0_COUNT:
+	case KVM_REG_MIPS_COUNT_CTL:
+	case KVM_REG_MIPS_COUNT_RESUME:
+	case KVM_REG_MIPS_COUNT_HZ:
+		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
+		if (ret)
+			return ret;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+		return put_user(v, uaddr64);
+	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+		u32 v32 = (u32)v;
+
+		return put_user(v32, uaddr32);
+	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+
+		return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+			    const struct kvm_one_reg *reg)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+	s64 v;
+	s64 vs[2];
+	unsigned int idx;
+
+	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+		if (get_user(v, uaddr64) != 0)
+			return -EFAULT;
+	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+		s32 v32;
+
+		if (get_user(v32, uaddr32) != 0)
+			return -EFAULT;
+		v = (s64)v32;
+	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+
+		return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
+	} else {
+		return -EINVAL;
+	}
+
+	switch (reg->id) {
+	/* General purpose registers */
+	case KVM_REG_MIPS_R0:
+		/* Silently ignore requests to set $0 */
+		break;
+	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+		break;
+	case KVM_REG_MIPS_HI:
+		vcpu->arch.hi = v;
+		break;
+	case KVM_REG_MIPS_LO:
+		vcpu->arch.lo = v;
+		break;
+	case KVM_REG_MIPS_PC:
+		vcpu->arch.pc = v;
+		break;
+
+	/* Floating point registers */
+	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+		/* Odd singles in top of even double when FR=0 */
+		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+			set_fpr32(&fpu->fpr[idx], 0, v);
+		else
+			set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
+		break;
+	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+		/* Can't access odd doubles in FR=0 mode */
+		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+			return -EINVAL;
+		set_fpr64(&fpu->fpr[idx], 0, v);
+		break;
+	case KVM_REG_MIPS_FCR_IR:
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		/* Read-only */
+		break;
+	case KVM_REG_MIPS_FCR_CSR:
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+			return -EINVAL;
+		fpu->fcr31 = v;
+		break;
+
+	/* MIPS SIMD Architecture (MSA) registers */
+	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+		if (!kvm_mips_guest_has_msa(&vcpu->arch))
+			return -EINVAL;
+		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+		/* least significant byte first */
+		set_fpr64(&fpu->fpr[idx], 0, vs[0]);
+		set_fpr64(&fpu->fpr[idx], 1, vs[1]);
+#else
+		/* most significant byte first */
+		set_fpr64(&fpu->fpr[idx], 1, vs[0]);
+		set_fpr64(&fpu->fpr[idx], 0, vs[1]);
+#endif
+		break;
+	case KVM_REG_MIPS_MSA_IR:
+		if (!kvm_mips_guest_has_msa(&vcpu->arch))
+			return -EINVAL;
+		/* Read-only */
+		break;
+	case KVM_REG_MIPS_MSA_CSR:
+		if (!kvm_mips_guest_has_msa(&vcpu->arch))
+			return -EINVAL;
+		fpu->msacsr = v;
+		break;
+
+	/* Co-processor 0 registers */
+	case KVM_REG_MIPS_CP0_INDEX:
+		kvm_write_c0_guest_index(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_CONTEXT:
+		kvm_write_c0_guest_context(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_USERLOCAL:
+		kvm_write_c0_guest_userlocal(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_PAGEMASK:
+		kvm_write_c0_guest_pagemask(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_WIRED:
+		kvm_write_c0_guest_wired(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_HWRENA:
+		kvm_write_c0_guest_hwrena(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_BADVADDR:
+		kvm_write_c0_guest_badvaddr(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_ENTRYHI:
+		kvm_write_c0_guest_entryhi(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_STATUS:
+		kvm_write_c0_guest_status(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_EPC:
+		kvm_write_c0_guest_epc(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_PRID:
+		kvm_write_c0_guest_prid(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_ERROREPC:
+		kvm_write_c0_guest_errorepc(cop0, v);
+		break;
+	/* registers to be handled specially */
+	case KVM_REG_MIPS_CP0_COUNT:
+	case KVM_REG_MIPS_CP0_COMPARE:
+	case KVM_REG_MIPS_CP0_CAUSE:
+	case KVM_REG_MIPS_CP0_CONFIG:
+	case KVM_REG_MIPS_CP0_CONFIG1:
+	case KVM_REG_MIPS_CP0_CONFIG2:
+	case KVM_REG_MIPS_CP0_CONFIG3:
+	case KVM_REG_MIPS_CP0_CONFIG4:
+	case KVM_REG_MIPS_CP0_CONFIG5:
+	case KVM_REG_MIPS_COUNT_CTL:
+	case KVM_REG_MIPS_COUNT_RESUME:
+	case KVM_REG_MIPS_COUNT_HZ:
+		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+				     struct kvm_enable_cap *cap)
+{
+	int r = 0;
+
+	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
+		return -EINVAL;
+	if (cap->flags)
+		return -EINVAL;
+	if (cap->args[0])
+		return -EINVAL;
+
+	switch (cap->cap) {
+	case KVM_CAP_MIPS_FPU:
+		vcpu->arch.fpu_enabled = true;
+		break;
+	case KVM_CAP_MIPS_MSA:
+		vcpu->arch.msa_enabled = true;
+		break;
+	default:
+		r = -EINVAL;
+		break;
+	}
+
+	return r;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+			 unsigned long arg)
+{
+	struct kvm_vcpu *vcpu = filp->private_data;
+	void __user *argp = (void __user *)arg;
+	long r;
+
+	switch (ioctl) {
+	case KVM_SET_ONE_REG:
+	case KVM_GET_ONE_REG: {
+		struct kvm_one_reg reg;
+
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+		if (ioctl == KVM_SET_ONE_REG)
+			return kvm_mips_set_reg(vcpu, &reg);
+		else
+			return kvm_mips_get_reg(vcpu, &reg);
+	}
+	case KVM_GET_REG_LIST: {
+		struct kvm_reg_list __user *user_list = argp;
+		u64 __user *reg_dest;
+		struct kvm_reg_list reg_list;
+		unsigned n;
+
+		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+			return -EFAULT;
+		n = reg_list.n;
+		reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+			return -EFAULT;
+		if (n < reg_list.n)
+			return -E2BIG;
+		reg_dest = user_list->reg;
+		if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+				 sizeof(kvm_mips_get_one_regs)))
+			return -EFAULT;
+		return 0;
+	}
+	case KVM_NMI:
+		/* Treat the NMI as a CPU reset */
+		r = kvm_mips_reset_vcpu(vcpu);
+		break;
+	case KVM_INTERRUPT:
+		{
+			struct kvm_mips_interrupt irq;
+
+			r = -EFAULT;
+			if (copy_from_user(&irq, argp, sizeof(irq)))
+				goto out;
+
+			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+				  irq.irq);
+
+			r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+			break;
+		}
+	case KVM_ENABLE_CAP: {
+		struct kvm_enable_cap cap;
+
+		r = -EFAULT;
+		if (copy_from_user(&cap, argp, sizeof(cap)))
+			goto out;
+		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+		break;
+	}
+	default:
+		r = -ENOIOCTLCMD;
+	}
+
+out:
+	return r;
+}
+
+/* Get (and clear) the dirty memory log for a memory slot. */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *memslot;
+	unsigned long ga, ga_end;
+	int is_dirty = 0;
+	int r;
+	unsigned long n;
+
+	mutex_lock(&kvm->slots_lock);
+
+	r = kvm_get_dirty_log(kvm, log, &is_dirty);
+	if (r)
+		goto out;
+
+	/* If nothing is dirty, don't bother messing with page tables. */
+	if (is_dirty) {
+		slots = kvm_memslots(kvm);
+		memslot = id_to_memslot(slots, log->slot);
+
+		ga = memslot->base_gfn << PAGE_SHIFT;
+		ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+		kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+			 ga_end);
+
+		n = kvm_dirty_bitmap_bytes(memslot);
+		memset(memslot->dirty_bitmap, 0, n);
+	}
+
+	r = 0;
+out:
+	mutex_unlock(&kvm->slots_lock);
+	return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+	long r;
+
+	switch (ioctl) {
+	default:
+		r = -ENOIOCTLCMD;
+	}
+
+	return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+	if (kvm_mips_callbacks) {
+		kvm_err("kvm: module already exists\n");
+		return -EEXIST;
+	}
+
+	return kvm_mips_emulation_init(&kvm_mips_callbacks);
+}
+
+void kvm_arch_exit(void)
+{
+	kvm_mips_callbacks = NULL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
+{
+	return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
+{
+	return -ENOIOCTLCMD;
+}
+
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+	return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+	return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+{
+	int r;
+
+	switch (ext) {
+	case KVM_CAP_ONE_REG:
+	case KVM_CAP_ENABLE_CAP:
+		r = 1;
+		break;
+	case KVM_CAP_COALESCED_MMIO:
+		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+		break;
+	case KVM_CAP_MIPS_FPU:
+		r = !!cpu_has_fpu;
+		break;
+	case KVM_CAP_MIPS_MSA:
+		/*
+		 * We don't support MSA vector partitioning yet:
+		 * 1) It would require explicit support which can't be tested
+		 *    yet due to lack of support in current hardware.
+		 * 2) It extends the state that would need to be saved/restored
+		 *    by e.g. QEMU for migration.
+		 *
+		 * When vector partitioning hardware becomes available, support
+		 * could be added by requiring a flag when enabling
+		 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
+		 * to save/restore the appropriate extra state.
+		 */
+		r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
+		break;
+	default:
+		r = 0;
+		break;
+	}
+	return r;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+	return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+	int i;
+	struct mips_coproc *cop0;
+
+	if (!vcpu)
+		return -1;
+
+	kvm_debug("VCPU Register Dump:\n");
+	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
+	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+	for (i = 0; i < 32; i += 4) {
+		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+		       vcpu->arch.gprs[i],
+		       vcpu->arch.gprs[i + 1],
+		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+	}
+	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+	cop0 = vcpu->arch.cop0;
+	kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+		  kvm_read_c0_guest_status(cop0),
+		  kvm_read_c0_guest_cause(cop0));
+
+	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	int i;
+
+	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+		vcpu->arch.gprs[i] = regs->gpr[i];
+	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
+	vcpu->arch.hi = regs->hi;
+	vcpu->arch.lo = regs->lo;
+	vcpu->arch.pc = regs->pc;
+
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+		regs->gpr[i] = vcpu->arch.gprs[i];
+
+	regs->hi = vcpu->arch.hi;
+	regs->lo = vcpu->arch.lo;
+	regs->pc = vcpu->arch.pc;
+
+	return 0;
+}
+
+static void kvm_mips_comparecount_func(unsigned long data)
+{
+	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+	kvm_mips_callbacks->queue_timer_int(vcpu);
+
+	vcpu->arch.wait = 0;
+	if (waitqueue_active(&vcpu->wq))
+		wake_up_interruptible(&vcpu->wq);
+}
+
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+	struct kvm_vcpu *vcpu;
+
+	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+	kvm_mips_comparecount_func((unsigned long) vcpu);
+	return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	kvm_mips_callbacks->vcpu_init(vcpu);
+	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_REL);
+	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+				  struct kvm_translation *tr)
+{
+	return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+	return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static void kvm_mips_set_c0_status(void)
+{
+	uint32_t status = read_c0_status();
+
+	if (cpu_has_dsp)
+		status |= (ST0_MX);
+
+	write_c0_status(status);
+	ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	uint32_t cause = vcpu->arch.host_cp0_cause;
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	/* re-enable HTW before enabling interrupts */
+	htw_start();
+
+	/* Set a default exit reason */
+	run->exit_reason = KVM_EXIT_UNKNOWN;
+	run->ready_for_interrupt_injection = 1;
+
+	/*
+	 * Set the appropriate status bits based on host CPU features,
+	 * before we hit the scheduler
+	 */
+	kvm_mips_set_c0_status();
+
+	local_irq_enable();
+
+	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+			cause, opc, run, vcpu);
+
+	/*
+	 * Do a privilege check, if in UM most of these exit conditions end up
+	 * causing an exception to be delivered to the Guest Kernel
+	 */
+	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+	if (er == EMULATE_PRIV_FAIL) {
+		goto skip_emul;
+	} else if (er == EMULATE_FAIL) {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		goto skip_emul;
+	}
+
+	switch (exccode) {
+	case T_INT:
+		kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+		++vcpu->stat.int_exits;
+		trace_kvm_exit(vcpu, INT_EXITS);
+
+		if (need_resched())
+			cond_resched();
+
+		ret = RESUME_GUEST;
+		break;
+
+	case T_COP_UNUSABLE:
+		kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+		++vcpu->stat.cop_unusable_exits;
+		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+		/* XXXKYMA: Might need to return to user space */
+		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
+			ret = RESUME_HOST;
+		break;
+
+	case T_TLB_MOD:
+		++vcpu->stat.tlbmod_exits;
+		trace_kvm_exit(vcpu, TLBMOD_EXITS);
+		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+		break;
+
+	case T_TLB_ST_MISS:
+		kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+			  badvaddr);
+
+		++vcpu->stat.tlbmiss_st_exits;
+		trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+		break;
+
+	case T_TLB_LD_MISS:
+		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+			  cause, opc, badvaddr);
+
+		++vcpu->stat.tlbmiss_ld_exits;
+		trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+		break;
+
+	case T_ADDR_ERR_ST:
+		++vcpu->stat.addrerr_st_exits;
+		trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+		break;
+
+	case T_ADDR_ERR_LD:
+		++vcpu->stat.addrerr_ld_exits;
+		trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+		break;
+
+	case T_SYSCALL:
+		++vcpu->stat.syscall_exits;
+		trace_kvm_exit(vcpu, SYSCALL_EXITS);
+		ret = kvm_mips_callbacks->handle_syscall(vcpu);
+		break;
+
+	case T_RES_INST:
+		++vcpu->stat.resvd_inst_exits;
+		trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+		break;
+
+	case T_BREAK:
+		++vcpu->stat.break_inst_exits;
+		trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+		ret = kvm_mips_callbacks->handle_break(vcpu);
+		break;
+
+	case T_TRAP:
+		++vcpu->stat.trap_inst_exits;
+		trace_kvm_exit(vcpu, TRAP_INST_EXITS);
+		ret = kvm_mips_callbacks->handle_trap(vcpu);
+		break;
+
+	case T_MSAFPE:
+		++vcpu->stat.msa_fpe_exits;
+		trace_kvm_exit(vcpu, MSA_FPE_EXITS);
+		ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
+		break;
+
+	case T_FPE:
+		++vcpu->stat.fpe_exits;
+		trace_kvm_exit(vcpu, FPE_EXITS);
+		ret = kvm_mips_callbacks->handle_fpe(vcpu);
+		break;
+
+	case T_MSADIS:
+		++vcpu->stat.msa_disabled_exits;
+		trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
+		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
+		break;
+
+	default:
+		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+			kvm_read_c0_guest_status(vcpu->arch.cop0));
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		break;
+
+	}
+
+skip_emul:
+	local_irq_disable();
+
+	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+		kvm_mips_deliver_interrupts(vcpu, cause);
+
+	if (!(ret & RESUME_HOST)) {
+		/* Only check for signals if not already exiting to userspace */
+		if (signal_pending(current)) {
+			run->exit_reason = KVM_EXIT_INTR;
+			ret = (-EINTR << 2) | RESUME_HOST;
+			++vcpu->stat.signal_exits;
+			trace_kvm_exit(vcpu, SIGNAL_EXITS);
+		}
+	}
+
+	if (ret == RESUME_GUEST) {
+		/*
+		 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
+		 * is live), restore FCR31 / MSACSR.
+		 *
+		 * This should be before returning to the guest exception
+		 * vector, as it may well cause an [MSA] FP exception if there
+		 * are pending exception bits unmasked. (see
+		 * kvm_mips_csr_die_notifier() for how that is handled).
+		 */
+		if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
+		    read_c0_status() & ST0_CU1)
+			__kvm_restore_fcsr(&vcpu->arch);
+
+		if (kvm_mips_guest_has_msa(&vcpu->arch) &&
+		    read_c0_config5() & MIPS_CONF5_MSAEN)
+			__kvm_restore_msacsr(&vcpu->arch);
+	}
+
+	/* Disable HTW before returning to guest or host */
+	htw_stop();
+
+	return ret;
+}
+
+/* Enable FPU for guest and restore context */
+void kvm_own_fpu(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned int sr, cfg5;
+
+	preempt_disable();
+
+	sr = kvm_read_c0_guest_status(cop0);
+
+	/*
+	 * If MSA state is already live, it is undefined how it interacts with
+	 * FR=0 FPU state, and we don't want to hit reserved instruction
+	 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
+	 * play it safe and save it first.
+	 *
+	 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
+	 * get called when guest CU1 is set, however we can't trust the guest
+	 * not to clobber the status register directly via the commpage.
+	 */
+	if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
+	    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+		kvm_lose_fpu(vcpu);
+
+	/*
+	 * Enable FPU for guest
+	 * We set FR and FRE according to guest context
+	 */
+	change_c0_status(ST0_CU1 | ST0_FR, sr);
+	if (cpu_has_fre) {
+		cfg5 = kvm_read_c0_guest_config5(cop0);
+		change_c0_config5(MIPS_CONF5_FRE, cfg5);
+	}
+	enable_fpu_hazard();
+
+	/* If guest FPU state not active, restore it now */
+	if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+		__kvm_restore_fpu(&vcpu->arch);
+		vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+	}
+
+	preempt_enable();
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+/* Enable MSA for guest and restore context */
+void kvm_own_msa(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned int sr, cfg5;
+
+	preempt_disable();
+
+	/*
+	 * Enable FPU if enabled in guest, since we're restoring FPU context
+	 * anyway. We set FR and FRE according to guest context.
+	 */
+	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+		sr = kvm_read_c0_guest_status(cop0);
+
+		/*
+		 * If FR=0 FPU state is already live, it is undefined how it
+		 * interacts with MSA state, so play it safe and save it first.
+		 */
+		if (!(sr & ST0_FR) &&
+		    (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
+				KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+			kvm_lose_fpu(vcpu);
+
+		change_c0_status(ST0_CU1 | ST0_FR, sr);
+		if (sr & ST0_CU1 && cpu_has_fre) {
+			cfg5 = kvm_read_c0_guest_config5(cop0);
+			change_c0_config5(MIPS_CONF5_FRE, cfg5);
+		}
+	}
+
+	/* Enable MSA for guest */
+	set_c0_config5(MIPS_CONF5_MSAEN);
+	enable_fpu_hazard();
+
+	switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
+	case KVM_MIPS_FPU_FPU:
+		/*
+		 * Guest FPU state already loaded, only restore upper MSA state
+		 */
+		__kvm_restore_msa_upper(&vcpu->arch);
+		vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+		break;
+	case 0:
+		/* Neither FPU or MSA already active, restore full MSA state */
+		__kvm_restore_msa(&vcpu->arch);
+		vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+		if (kvm_mips_guest_has_fpu(&vcpu->arch))
+			vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+		break;
+	default:
+		break;
+	}
+
+	preempt_enable();
+}
+#endif
+
+/* Drop FPU & MSA without saving it */
+void kvm_drop_fpu(struct kvm_vcpu *vcpu)
+{
+	preempt_disable();
+	if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+		disable_msa();
+		vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+	}
+	if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+		clear_c0_status(ST0_CU1 | ST0_FR);
+		vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+	}
+	preempt_enable();
+}
+
+/* Save and disable FPU & MSA */
+void kvm_lose_fpu(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * FPU & MSA get disabled in root context (hardware) when it is disabled
+	 * in guest context (software), but the register state in the hardware
+	 * may still be in use. This is why we explicitly re-enable the hardware
+	 * before saving.
+	 */
+
+	preempt_disable();
+	if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+		set_c0_config5(MIPS_CONF5_MSAEN);
+		enable_fpu_hazard();
+
+		__kvm_save_msa(&vcpu->arch);
+
+		/* Disable MSA & FPU */
+		disable_msa();
+		if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+			clear_c0_status(ST0_CU1 | ST0_FR);
+		vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
+	} else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+		set_c0_status(ST0_CU1);
+		enable_fpu_hazard();
+
+		__kvm_save_fpu(&vcpu->arch);
+		vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+
+		/* Disable FPU */
+		clear_c0_status(ST0_CU1 | ST0_FR);
+	}
+	preempt_enable();
+}
+
+/*
+ * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
+ * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
+ * exception if cause bits are set in the value being written.
+ */
+static int kvm_mips_csr_die_notify(struct notifier_block *self,
+				   unsigned long cmd, void *ptr)
+{
+	struct die_args *args = (struct die_args *)ptr;
+	struct pt_regs *regs = args->regs;
+	unsigned long pc;
+
+	/* Only interested in FPE and MSAFPE */
+	if (cmd != DIE_FP && cmd != DIE_MSAFP)
+		return NOTIFY_DONE;
+
+	/* Return immediately if guest context isn't active */
+	if (!(current->flags & PF_VCPU))
+		return NOTIFY_DONE;
+
+	/* Should never get here from user mode */
+	BUG_ON(user_mode(regs));
+
+	pc = instruction_pointer(regs);
+	switch (cmd) {
+	case DIE_FP:
+		/* match 2nd instruction in __kvm_restore_fcsr */
+		if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
+			return NOTIFY_DONE;
+		break;
+	case DIE_MSAFP:
+		/* match 2nd/3rd instruction in __kvm_restore_msacsr */
+		if (!cpu_has_msa ||
+		    pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
+		    pc > (unsigned long)&__kvm_restore_msacsr + 8)
+			return NOTIFY_DONE;
+		break;
+	}
+
+	/* Move PC forward a little and continue executing */
+	instruction_pointer(regs) += 4;
+
+	return NOTIFY_STOP;
+}
+
+static struct notifier_block kvm_mips_csr_die_notifier = {
+	.notifier_call = kvm_mips_csr_die_notify,
+};
+
+int __init kvm_mips_init(void)
+{
+	int ret;
+
+	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+	if (ret)
+		return ret;
+
+	register_die_notifier(&kvm_mips_csr_die_notifier);
+
+	/*
+	 * On MIPS, kernel modules are executed from "mapped space", which
+	 * requires TLBs. The TLB handling code is statically linked with
+	 * the rest of the kernel (tlb.c) to avoid the possibility of
+	 * double faulting. The issue is that the TLB code references
+	 * routines that are part of the the KVM module, which are only
+	 * available once the module is loaded.
+	 */
+	kvm_mips_gfn_to_pfn = gfn_to_pfn;
+	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+	kvm_mips_is_error_pfn = is_error_pfn;
+
+	return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+	kvm_exit();
+
+	kvm_mips_gfn_to_pfn = NULL;
+	kvm_mips_release_pfn_clean = NULL;
+	kvm_mips_is_error_pfn = NULL;
+
+	unregister_die_notifier(&kvm_mips_csr_die_notifier);
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 0000000..d02f0c6
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * MIPS SIMD Architecture (MSA) context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+
+	.set	noreorder
+	.set	noat
+
+LEAF(__kvm_save_msa)
+	st_d	0,  VCPU_FPR0,  a0
+	st_d	1,  VCPU_FPR1,  a0
+	st_d	2,  VCPU_FPR2,  a0
+	st_d	3,  VCPU_FPR3,  a0
+	st_d	4,  VCPU_FPR4,  a0
+	st_d	5,  VCPU_FPR5,  a0
+	st_d	6,  VCPU_FPR6,  a0
+	st_d	7,  VCPU_FPR7,  a0
+	st_d	8,  VCPU_FPR8,  a0
+	st_d	9,  VCPU_FPR9,  a0
+	st_d	10, VCPU_FPR10, a0
+	st_d	11, VCPU_FPR11, a0
+	st_d	12, VCPU_FPR12, a0
+	st_d	13, VCPU_FPR13, a0
+	st_d	14, VCPU_FPR14, a0
+	st_d	15, VCPU_FPR15, a0
+	st_d	16, VCPU_FPR16, a0
+	st_d	17, VCPU_FPR17, a0
+	st_d	18, VCPU_FPR18, a0
+	st_d	19, VCPU_FPR19, a0
+	st_d	20, VCPU_FPR20, a0
+	st_d	21, VCPU_FPR21, a0
+	st_d	22, VCPU_FPR22, a0
+	st_d	23, VCPU_FPR23, a0
+	st_d	24, VCPU_FPR24, a0
+	st_d	25, VCPU_FPR25, a0
+	st_d	26, VCPU_FPR26, a0
+	st_d	27, VCPU_FPR27, a0
+	st_d	28, VCPU_FPR28, a0
+	st_d	29, VCPU_FPR29, a0
+	st_d	30, VCPU_FPR30, a0
+	st_d	31, VCPU_FPR31, a0
+	jr	ra
+	 nop
+	END(__kvm_save_msa)
+
+LEAF(__kvm_restore_msa)
+	ld_d	0,  VCPU_FPR0,  a0
+	ld_d	1,  VCPU_FPR1,  a0
+	ld_d	2,  VCPU_FPR2,  a0
+	ld_d	3,  VCPU_FPR3,  a0
+	ld_d	4,  VCPU_FPR4,  a0
+	ld_d	5,  VCPU_FPR5,  a0
+	ld_d	6,  VCPU_FPR6,  a0
+	ld_d	7,  VCPU_FPR7,  a0
+	ld_d	8,  VCPU_FPR8,  a0
+	ld_d	9,  VCPU_FPR9,  a0
+	ld_d	10, VCPU_FPR10, a0
+	ld_d	11, VCPU_FPR11, a0
+	ld_d	12, VCPU_FPR12, a0
+	ld_d	13, VCPU_FPR13, a0
+	ld_d	14, VCPU_FPR14, a0
+	ld_d	15, VCPU_FPR15, a0
+	ld_d	16, VCPU_FPR16, a0
+	ld_d	17, VCPU_FPR17, a0
+	ld_d	18, VCPU_FPR18, a0
+	ld_d	19, VCPU_FPR19, a0
+	ld_d	20, VCPU_FPR20, a0
+	ld_d	21, VCPU_FPR21, a0
+	ld_d	22, VCPU_FPR22, a0
+	ld_d	23, VCPU_FPR23, a0
+	ld_d	24, VCPU_FPR24, a0
+	ld_d	25, VCPU_FPR25, a0
+	ld_d	26, VCPU_FPR26, a0
+	ld_d	27, VCPU_FPR27, a0
+	ld_d	28, VCPU_FPR28, a0
+	ld_d	29, VCPU_FPR29, a0
+	ld_d	30, VCPU_FPR30, a0
+	ld_d	31, VCPU_FPR31, a0
+	jr	ra
+	 nop
+	END(__kvm_restore_msa)
+
+	.macro	kvm_restore_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	ld	$1, \off(\base)
+	insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	lw	$1, \off(\base)
+	insert_w \wr, 2
+	lw	$1, (\off+4)(\base)
+	insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	lw	$1, (\off+4)(\base)
+	insert_w \wr, 2
+	lw	$1, \off(\base)
+	insert_w \wr, 3
+#endif
+	.set	pop
+	.endm
+
+LEAF(__kvm_restore_msa_upper)
+	kvm_restore_msa_upper	0,  VCPU_FPR0 +8, a0
+	kvm_restore_msa_upper	1,  VCPU_FPR1 +8, a0
+	kvm_restore_msa_upper	2,  VCPU_FPR2 +8, a0
+	kvm_restore_msa_upper	3,  VCPU_FPR3 +8, a0
+	kvm_restore_msa_upper	4,  VCPU_FPR4 +8, a0
+	kvm_restore_msa_upper	5,  VCPU_FPR5 +8, a0
+	kvm_restore_msa_upper	6,  VCPU_FPR6 +8, a0
+	kvm_restore_msa_upper	7,  VCPU_FPR7 +8, a0
+	kvm_restore_msa_upper	8,  VCPU_FPR8 +8, a0
+	kvm_restore_msa_upper	9,  VCPU_FPR9 +8, a0
+	kvm_restore_msa_upper	10, VCPU_FPR10+8, a0
+	kvm_restore_msa_upper	11, VCPU_FPR11+8, a0
+	kvm_restore_msa_upper	12, VCPU_FPR12+8, a0
+	kvm_restore_msa_upper	13, VCPU_FPR13+8, a0
+	kvm_restore_msa_upper	14, VCPU_FPR14+8, a0
+	kvm_restore_msa_upper	15, VCPU_FPR15+8, a0
+	kvm_restore_msa_upper	16, VCPU_FPR16+8, a0
+	kvm_restore_msa_upper	17, VCPU_FPR17+8, a0
+	kvm_restore_msa_upper	18, VCPU_FPR18+8, a0
+	kvm_restore_msa_upper	19, VCPU_FPR19+8, a0
+	kvm_restore_msa_upper	20, VCPU_FPR20+8, a0
+	kvm_restore_msa_upper	21, VCPU_FPR21+8, a0
+	kvm_restore_msa_upper	22, VCPU_FPR22+8, a0
+	kvm_restore_msa_upper	23, VCPU_FPR23+8, a0
+	kvm_restore_msa_upper	24, VCPU_FPR24+8, a0
+	kvm_restore_msa_upper	25, VCPU_FPR25+8, a0
+	kvm_restore_msa_upper	26, VCPU_FPR26+8, a0
+	kvm_restore_msa_upper	27, VCPU_FPR27+8, a0
+	kvm_restore_msa_upper	28, VCPU_FPR28+8, a0
+	kvm_restore_msa_upper	29, VCPU_FPR29+8, a0
+	kvm_restore_msa_upper	30, VCPU_FPR30+8, a0
+	kvm_restore_msa_upper	31, VCPU_FPR31+8, a0
+	jr	ra
+	 nop
+	END(__kvm_restore_msa_upper)
+
+LEAF(__kvm_restore_msacsr)
+	lw	t0, VCPU_MSA_CSR(a0)
+	/*
+	 * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
+	 * See kvm_mips_csr_die_notify() which handles t0 containing a value
+	 * which triggers an MSA FP Exception, which must be stepped over and
+	 * ignored since the set cause bits must remain there for the guest.
+	 */
+	_ctcmsa	MSA_CSR, t0
+	jr	ra
+	 nop
+	END(__kvm_restore_msacsr)
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h
new file mode 100644
index 0000000..03a6ae8
--- /dev/null
+++ b/arch/mips/kvm/opcode.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/* Define opcode values not defined in <asm/isnt.h> */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define mfmcz_op	0x0b	/* 01011 */
+#define wrpgpr_op	0x0e	/* 01110 */
+
+/* COP0 opcodes (only if COP0 and CO=1): */
+#define wait_op		0x20	/* 100000 */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
new file mode 100644
index 0000000..888bb67
--- /dev/null
+++ b/arch/mips/kvm/stats.c
@@ -0,0 +1,84 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: COP0 access histogram
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+	"WAIT",
+	"CACHE",
+	"Signal",
+	"Interrupt",
+	"COP0/1 Unusable",
+	"TLB Mod",
+	"TLB Miss (LD)",
+	"TLB Miss (ST)",
+	"Address Err (ST)",
+	"Address Error (LD)",
+	"System Call",
+	"Reserved Inst",
+	"Break Inst",
+	"Trap Inst",
+	"MSA FPE",
+	"FPE",
+	"MSA Disabled",
+	"D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+	"Index",
+	"Random",
+	"EntryLo0",
+	"EntryLo1",
+	"Context",
+	"PG Mask",
+	"Wired",
+	"HWREna",
+	"BadVAddr",
+	"Count",
+	"EntryHI",
+	"Compare",
+	"Status",
+	"Cause",
+	"EXC PC",
+	"PRID",
+	"Config",
+	"LLAddr",
+	"Watch Lo",
+	"Watch Hi",
+	"X Context",
+	"Reserved",
+	"Impl Dep",
+	"Debug",
+	"DEPC",
+	"PerfCnt",
+	"ErrCtl",
+	"CacheErr",
+	"TagLo",
+	"TagHi",
+	"ErrorEPC",
+	"DESAVE"
+};
+
+void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+	int i, j;
+
+	kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+	for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+		for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+			if (vcpu->arch.cop0->stat[i][j])
+				kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+					 vcpu->arch.cop0->stat[i][j]);
+		}
+	}
+#endif
+}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
new file mode 100644
index 0000000..eff71c7
--- /dev/null
+++ b/arch/mips/kvm/tlb.c
@@ -0,0 +1,836 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+ * TLB handlers run from KSEG0
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlb.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+}
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+}
+
+inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->arch.commpage_tlb;
+}
+
+/* Structure defining an tlb entry data set. */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+	unsigned long old_entryhi;
+	unsigned long old_pagemask;
+	struct kvm_mips_tlb tlb;
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	old_pagemask = read_c0_pagemask();
+
+	kvm_info("HOST TLBs:\n");
+	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+
+	for (i = 0; i < current_cpu_data.tlbsize; i++) {
+		write_c0_index(i);
+		mtc0_tlbw_hazard();
+
+		tlb_read();
+		tlbw_use_hazard();
+
+		tlb.tlb_hi = read_c0_entryhi();
+		tlb.tlb_lo0 = read_c0_entrylo0();
+		tlb.tlb_lo1 = read_c0_entrylo1();
+		tlb.tlb_mask = read_c0_pagemask();
+
+		kvm_info("TLB%c%3d Hi 0x%08lx ",
+			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+			 i, tlb.tlb_hi);
+		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo0 >> 3) & 7);
+		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+	}
+	write_c0_entryhi(old_entryhi);
+	write_c0_pagemask(old_pagemask);
+	mtc0_tlbw_hazard();
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_mips_tlb tlb;
+	int i;
+
+	kvm_info("Guest TLBs:\n");
+	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+		tlb = vcpu->arch.guest_tlb[i];
+		kvm_info("TLB%c%3d Hi 0x%08lx ",
+			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+			 i, tlb.tlb_hi);
+		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo0 >> 3) & 7);
+		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+	}
+}
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+	int srcu_idx, err = 0;
+	pfn_t pfn;
+
+	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+		return 0;
+
+	srcu_idx = srcu_read_lock(&kvm->srcu);
+	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+	if (is_error_noslot_pfn(pfn)) {
+		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+		err = -EFAULT;
+		goto out;
+	}
+
+	kvm->arch.guest_pmap[gfn] = pfn;
+out:
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+	return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+						    unsigned long gva)
+{
+	gfn_t gfn;
+	uint32_t offset = gva & ~PAGE_MASK;
+	struct kvm *kvm = vcpu->kvm;
+
+	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+			__builtin_return_address(0), gva);
+		return KVM_INVALID_PAGE;
+	}
+
+	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+	if (gfn >= kvm->arch.guest_pmap_npages) {
+		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+			gva);
+		return KVM_INVALID_PAGE;
+	}
+
+	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+		return KVM_INVALID_ADDR;
+
+	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+			    unsigned long entrylo0, unsigned long entrylo1,
+			    int flush_dcache_mask)
+{
+	unsigned long flags;
+	unsigned long old_entryhi;
+	int idx;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	write_c0_entryhi(entryhi);
+	mtc0_tlbw_hazard();
+
+	tlb_probe();
+	tlb_probe_hazard();
+	idx = read_c0_index();
+
+	if (idx > current_cpu_data.tlbsize) {
+		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+		kvm_mips_dump_host_tlbs();
+		local_irq_restore(flags);
+		return -1;
+	}
+
+	write_c0_entrylo0(entrylo0);
+	write_c0_entrylo1(entrylo1);
+	mtc0_tlbw_hazard();
+
+	if (idx < 0)
+		tlb_write_random();
+	else
+		tlb_write_indexed();
+	tlbw_use_hazard();
+
+	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+		  vcpu->arch.pc, idx, read_c0_entryhi(),
+		  read_c0_entrylo0(), read_c0_entrylo1());
+
+	/* Flush D-cache */
+	if (flush_dcache_mask) {
+		if (entrylo0 & MIPS3_PG_V) {
+			++vcpu->stat.flush_dcache_exits;
+			flush_data_cache_page((entryhi & VPN2_MASK) &
+					      ~flush_dcache_mask);
+		}
+		if (entrylo1 & MIPS3_PG_V) {
+			++vcpu->stat.flush_dcache_exits;
+			flush_data_cache_page(((entryhi & VPN2_MASK) &
+					       ~flush_dcache_mask) |
+					      (0x1 << PAGE_SHIFT));
+		}
+	}
+
+	/* Restore old ASID */
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+	local_irq_restore(flags);
+	return 0;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+				    struct kvm_vcpu *vcpu)
+{
+	gfn_t gfn;
+	pfn_t pfn0, pfn1;
+	unsigned long vaddr = 0;
+	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+	int even;
+	struct kvm *kvm = vcpu->kvm;
+	const int flush_dcache_mask = 0;
+
+	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		return -1;
+	}
+
+	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+			gfn, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		return -1;
+	}
+	even = !(gfn & 0x1);
+	vaddr = badvaddr & (PAGE_MASK << 1);
+
+	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+		return -1;
+
+	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+		return -1;
+
+	if (even) {
+		pfn0 = kvm->arch.guest_pmap[gfn];
+		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+	} else {
+		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+		pfn1 = kvm->arch.guest_pmap[gfn];
+	}
+
+	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+		   (1 << 2) | (0x1 << 1);
+	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+		   (1 << 2) | (0x1 << 1);
+
+	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+				       flush_dcache_mask);
+}
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+	struct kvm_vcpu *vcpu)
+{
+	pfn_t pfn0, pfn1;
+	unsigned long flags, old_entryhi = 0, vaddr = 0;
+	unsigned long entrylo0 = 0, entrylo1 = 0;
+
+	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+	pfn1 = 0;
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+		   (1 << 2) | (0x1 << 1);
+	entrylo1 = 0;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	vaddr = badvaddr & (PAGE_MASK << 1);
+	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+	mtc0_tlbw_hazard();
+	write_c0_entrylo0(entrylo0);
+	mtc0_tlbw_hazard();
+	write_c0_entrylo1(entrylo1);
+	mtc0_tlbw_hazard();
+	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+	mtc0_tlbw_hazard();
+	tlb_write_indexed();
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+		  read_c0_entrylo0(), read_c0_entrylo1());
+
+	/* Restore old ASID */
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+	local_irq_restore(flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+					 struct kvm_mips_tlb *tlb,
+					 unsigned long *hpa0,
+					 unsigned long *hpa1)
+{
+	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+	struct kvm *kvm = vcpu->kvm;
+	pfn_t pfn0, pfn1;
+	gfn_t gfn0, gfn1;
+	long tlb_lo[2];
+
+	tlb_lo[0] = tlb->tlb_lo0;
+	tlb_lo[1] = tlb->tlb_lo1;
+
+	/*
+	 * The commpage address must not be mapped to anything else if the guest
+	 * TLB contains entries nearby, or commpage accesses will break.
+	 */
+	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+			VPN2_MASK & (PAGE_MASK << 1)))
+		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+	if (gfn0 >= kvm->arch.guest_pmap_npages ||
+	    gfn1 >= kvm->arch.guest_pmap_npages) {
+		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+			__func__, gfn0, gfn1, tlb->tlb_hi);
+		kvm_mips_dump_guest_tlbs(vcpu);
+		return -1;
+	}
+
+	if (kvm_mips_map_page(kvm, gfn0) < 0)
+		return -1;
+
+	if (kvm_mips_map_page(kvm, gfn1) < 0)
+		return -1;
+
+	pfn0 = kvm->arch.guest_pmap[gfn0];
+	pfn1 = kvm->arch.guest_pmap[gfn1];
+
+	if (hpa0)
+		*hpa0 = pfn0 << PAGE_SHIFT;
+
+	if (hpa1)
+		*hpa1 = pfn1 << PAGE_SHIFT;
+
+	/* Get attributes from the Guest TLB */
+	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+					       kvm_mips_get_kernel_asid(vcpu) :
+					       kvm_mips_get_user_asid(vcpu));
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+		   (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
+	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+		   (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
+
+	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+		  tlb->tlb_lo0, tlb->tlb_lo1);
+
+	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+				       tlb->tlb_mask);
+}
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+	int i;
+	int index = -1;
+	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
+		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
+			index = i;
+			break;
+		}
+	}
+
+	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+
+	return index;
+}
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+	unsigned long old_entryhi, flags;
+	int idx;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+
+	if (KVM_GUEST_KERNEL_MODE(vcpu))
+		write_c0_entryhi((vaddr & VPN2_MASK) |
+				 kvm_mips_get_kernel_asid(vcpu));
+	else {
+		write_c0_entryhi((vaddr & VPN2_MASK) |
+				 kvm_mips_get_user_asid(vcpu));
+	}
+
+	mtc0_tlbw_hazard();
+
+	tlb_probe();
+	tlb_probe_hazard();
+	idx = read_c0_index();
+
+	/* Restore old ASID */
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+
+	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+
+	return idx;
+}
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+	int idx;
+	unsigned long flags, old_entryhi;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+
+	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+	mtc0_tlbw_hazard();
+
+	tlb_probe();
+	tlb_probe_hazard();
+	idx = read_c0_index();
+
+	if (idx >= current_cpu_data.tlbsize)
+		BUG();
+
+	if (idx > 0) {
+		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+		mtc0_tlbw_hazard();
+
+		write_c0_entrylo0(0);
+		mtc0_tlbw_hazard();
+
+		write_c0_entrylo1(0);
+		mtc0_tlbw_hazard();
+
+		tlb_write_indexed();
+		mtc0_tlbw_hazard();
+	}
+
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+
+	if (idx > 0)
+		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
+
+	return 0;
+}
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+	unsigned long flags, old_entryhi;
+
+	if (index >= current_cpu_data.tlbsize)
+		BUG();
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+
+	write_c0_entryhi(UNIQUE_ENTRYHI(index));
+	mtc0_tlbw_hazard();
+
+	write_c0_index(index);
+	mtc0_tlbw_hazard();
+
+	write_c0_entrylo0(0);
+	mtc0_tlbw_hazard();
+
+	write_c0_entrylo1(0);
+	mtc0_tlbw_hazard();
+
+	tlb_write_indexed();
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+	unsigned long flags;
+	unsigned long old_entryhi, entryhi;
+	unsigned long old_pagemask;
+	int entry = 0;
+	int maxentry = current_cpu_data.tlbsize;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	old_pagemask = read_c0_pagemask();
+
+	/* Blast 'em all away. */
+	for (entry = 0; entry < maxentry; entry++) {
+		write_c0_index(entry);
+		mtc0_tlbw_hazard();
+
+		if (skip_kseg0) {
+			tlb_read();
+			tlbw_use_hazard();
+
+			entryhi = read_c0_entryhi();
+
+			/* Don't blow away guest kernel entries */
+			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
+				continue;
+		}
+
+		/* Make sure all entries differ. */
+		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+		mtc0_tlbw_hazard();
+		write_c0_entrylo0(0);
+		mtc0_tlbw_hazard();
+		write_c0_entrylo1(0);
+		mtc0_tlbw_hazard();
+
+		tlb_write_indexed();
+		mtc0_tlbw_hazard();
+	}
+
+	tlbw_use_hazard();
+
+	write_c0_entryhi(old_entryhi);
+	write_c0_pagemask(old_pagemask);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+			     struct kvm_vcpu *vcpu)
+{
+	unsigned long asid = asid_cache(cpu);
+
+	asid += ASID_INC;
+	if (!(asid & ASID_MASK)) {
+		if (cpu_has_vtag_icache)
+			flush_icache_all();
+
+		kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+		if (!asid)      /* fix version if needed */
+			asid = ASID_FIRST_VERSION;
+	}
+
+	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_local_flush_tlb_all(void)
+{
+	unsigned long flags;
+	unsigned long old_ctx;
+	int entry = 0;
+
+	local_irq_save(flags);
+	/* Save old context and create impossible VPN2 value */
+	old_ctx = read_c0_entryhi();
+	write_c0_entrylo0(0);
+	write_c0_entrylo1(0);
+
+	/* Blast 'em all away. */
+	while (entry < current_cpu_data.tlbsize) {
+		/* Make sure all entries differ. */
+		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+		write_c0_index(entry);
+		mtc0_tlbw_hazard();
+		tlb_write_indexed();
+		entry++;
+	}
+	tlbw_use_hazard();
+	write_c0_entryhi(old_ctx);
+	mtc0_tlbw_hazard();
+
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu:	Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+		hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	unsigned long flags;
+	int newasid = 0;
+
+	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+	/* Alocate new kernel and user ASIDs if needed */
+
+	local_irq_save(flags);
+
+	if (((vcpu->arch.
+	      guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+		vcpu->arch.guest_kernel_asid[cpu] =
+		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
+		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+		vcpu->arch.guest_user_asid[cpu] =
+		    vcpu->arch.guest_user_mm.context.asid[cpu];
+		newasid++;
+
+		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+			  cpu_context(cpu, current->mm));
+		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
+		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+			  vcpu->arch.guest_user_asid[cpu]);
+	}
+
+	if (vcpu->arch.last_sched_cpu != cpu) {
+		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+		/*
+		 * Migrate the timer interrupt to the current CPU so that it
+		 * always interrupts the guest and synchronously triggers a
+		 * guest timer interrupt.
+		 */
+		kvm_mips_migrate_count(vcpu);
+	}
+
+	if (!newasid) {
+		/*
+		 * If we preempted while the guest was executing, then reload
+		 * the pre-empted ASID
+		 */
+		if (current->flags & PF_VCPU) {
+			write_c0_entryhi(vcpu->arch.
+					 preempt_entryhi & ASID_MASK);
+			ehb();
+		}
+	} else {
+		/* New ASIDs were allocated for the VM */
+
+		/*
+		 * Were we in guest context? If so then the pre-empted ASID is
+		 * no longer valid, we need to set it to what it should be based
+		 * on the mode of the Guest (Kernel/User)
+		 */
+		if (current->flags & PF_VCPU) {
+			if (KVM_GUEST_KERNEL_MODE(vcpu))
+				write_c0_entryhi(vcpu->arch.
+						 guest_kernel_asid[cpu] &
+						 ASID_MASK);
+			else
+				write_c0_entryhi(vcpu->arch.
+						 guest_user_asid[cpu] &
+						 ASID_MASK);
+			ehb();
+		}
+	}
+
+	/* restore guest state to registers */
+	kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
+	local_irq_restore(flags);
+
+}
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	unsigned long flags;
+	uint32_t cpu;
+
+	local_irq_save(flags);
+
+	cpu = smp_processor_id();
+
+	vcpu->arch.preempt_entryhi = read_c0_entryhi();
+	vcpu->arch.last_sched_cpu = cpu;
+
+	/* save guest state in registers */
+	kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
+	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+	     ASID_VERSION_MASK)) {
+		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+			  cpu_context(cpu, current->mm));
+		drop_mmu_context(current->mm, cpu);
+	}
+	write_c0_entryhi(cpu_asid(cpu, current->mm));
+	ehb();
+
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned long paddr, flags, vpn2, asid;
+	uint32_t inst;
+	int index;
+
+	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+		local_irq_save(flags);
+		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+		if (index >= 0) {
+			inst = *(opc);
+		} else {
+			vpn2 = (unsigned long) opc & VPN2_MASK;
+			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
+			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
+			if (index < 0) {
+				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+					__func__, opc, vcpu, read_c0_entryhi());
+				kvm_mips_dump_host_tlbs();
+				local_irq_restore(flags);
+				return KVM_INVALID_INST;
+			}
+			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+						&vcpu->arch.guest_tlb[index],
+						NULL, NULL)) {
+				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+					__func__, opc, index, vcpu,
+					read_c0_entryhi());
+				kvm_mips_dump_guest_tlbs(vcpu);
+				local_irq_restore(flags);
+				return KVM_INVALID_INST;
+			}
+			inst = *(opc);
+		}
+		local_irq_restore(flags);
+	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+		paddr =
+		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+							  (unsigned long) opc);
+		inst = *(uint32_t *) CKSEG0ADDR(paddr);
+	} else {
+		kvm_err("%s: illegal address: %p\n", __func__, opc);
+		return KVM_INVALID_INST;
+	}
+
+	return inst;
+}
+EXPORT_SYMBOL(kvm_get_inst);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 0000000..bd6437f
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,44 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/* Tracepoints for VM eists */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+	    TP_ARGS(vcpu, reason),
+	    TP_STRUCT__entry(
+			__field(unsigned long, pc)
+			__field(unsigned int, reason)
+	    ),
+
+	    TP_fast_assign(
+			__entry->pc = vcpu->arch.pc;
+			__entry->reason = reason;
+	    ),
+
+	    TP_printk("[%s]PC: 0x%08lx",
+		      kvm_mips_exit_types_str[__entry->reason],
+		      __entry->pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
new file mode 100644
index 0000000..307cc4c
--- /dev/null
+++ b/arch/mips/kvm/trap_emul.c
@@ -0,0 +1,673 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "opcode.h"
+#include "interrupt.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+	gpa_t gpa;
+	uint32_t kseg = KSEGX(gva);
+
+	if ((kseg == CKSEG0) || (kseg == CKSEG1))
+		gpa = CPHYSADDR(gva);
+	else {
+		kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+		kvm_mips_dump_host_tlbs();
+		gpa = KVM_INVALID_ADDR;
+	}
+
+	kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+
+	return gpa;
+}
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+		/* FPU Unusable */
+		if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+		    (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
+			/*
+			 * Unusable/no FPU in guest:
+			 * deliver guest COP1 Unusable Exception
+			 */
+			er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+		} else {
+			/* Restore FPU state */
+			kvm_own_fpu(vcpu);
+			er = EMULATE_DONE;
+		}
+	} else {
+		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+	}
+
+	switch (er) {
+	case EMULATE_DONE:
+		ret = RESUME_GUEST;
+		break;
+
+	case EMULATE_FAIL:
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		break;
+
+	case EMULATE_WAIT:
+		run->exit_reason = KVM_EXIT_INTR;
+		ret = RESUME_HOST;
+		break;
+
+	default:
+		BUG();
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+	    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+		kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			  cause, opc, badvaddr);
+		er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+		if (er == EMULATE_DONE)
+			ret = RESUME_GUEST;
+		else {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+		/*
+		 * XXXKYMA: The guest kernel does not expect to get this fault
+		 * when we are not using HIGHMEM. Need to address this in a
+		 * HIGHMEM kernel
+		 */
+		kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	} else {
+		kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
+		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+		kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			  cause, opc, badvaddr);
+		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+		if (er == EMULATE_DONE)
+			ret = RESUME_GUEST;
+		else {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+		/*
+		 * All KSEG0 faults are handled by KVM, as the guest kernel does
+		 * not expect to ever get them
+		 */
+		if (kvm_mips_handle_kseg0_tlb_fault
+		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else {
+		kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
+		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+		kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+			  vcpu->arch.pc, badvaddr);
+
+		/*
+		 * User Address (UA) fault, this could happen if
+		 * (1) TLB entry not present/valid in both Guest and shadow host
+		 *     TLBs, in this case we pass on the fault to the guest
+		 *     kernel and let it handle it.
+		 * (2) TLB entry is present in the Guest TLB but not in the
+		 *     shadow, in this case we inject the TLB from the Guest TLB
+		 *     into the shadow host TLB
+		 */
+
+		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+		if (er == EMULATE_DONE)
+			ret = RESUME_GUEST;
+		else {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+		if (kvm_mips_handle_kseg0_tlb_fault
+		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else {
+		kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (KVM_GUEST_KERNEL_MODE(vcpu)
+	    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+		kvm_debug("Emulate Store to MMIO space\n");
+		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+		if (er == EMULATE_FAIL) {
+			kvm_err("Emulate Store to MMIO space failed\n");
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		} else {
+			run->exit_reason = KVM_EXIT_MMIO;
+			ret = RESUME_HOST;
+		}
+	} else {
+		kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+		kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+		if (er == EMULATE_FAIL) {
+			kvm_err("Emulate Load from MMIO space failed\n");
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		} else {
+			run->exit_reason = KVM_EXIT_MMIO;
+			ret = RESUME_HOST;
+		}
+	} else {
+		kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		er = EMULATE_FAIL;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE)
+		ret = RESUME_GUEST;
+	else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE)
+		ret = RESUME_GUEST;
+	else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE)
+		ret = RESUME_GUEST;
+	else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE) {
+		ret = RESUME_GUEST;
+	} else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE) {
+		ret = RESUME_GUEST;
+	} else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE) {
+		ret = RESUME_GUEST;
+	} else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+/**
+ * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu:	Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled.
+ */
+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+	    (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
+		/*
+		 * No MSA in guest, or FPU enabled and not in FR=1 mode,
+		 * guest reserved instruction exception
+		 */
+		er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+	} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
+		/* MSA disabled by guest, guest MSA disabled exception */
+		er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
+	} else {
+		/* Restore MSA/FPU state */
+		kvm_own_msa(vcpu);
+		er = EMULATE_DONE;
+	}
+
+	switch (er) {
+	case EMULATE_DONE:
+		ret = RESUME_GUEST;
+		break;
+
+	case EMULATE_FAIL:
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		break;
+
+	default:
+		BUG();
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+	return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	uint32_t config1;
+	int vcpu_id = vcpu->vcpu_id;
+
+	/*
+	 * Arch specific stuff, set up config registers properly so that the
+	 * guest will come up as expected, for now we simulate a MIPS 24kc
+	 */
+	kvm_write_c0_guest_prid(cop0, 0x00019300);
+	/* Have config1, Cacheable, noncoherent, write-back, write allocate */
+	kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
+				  (0x1 << CP0C0_AR) |
+				  (MMU_TYPE_R4000 << CP0C0_MT));
+
+	/* Read the cache characteristics from the host Config1 Register */
+	config1 = (read_c0_config1() & ~0x7f);
+
+	/* Set up MMU size */
+	config1 &= ~(0x3f << 25);
+	config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+	/* We unset some bits that we aren't emulating */
+	config1 &=
+	    ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+	      (1 << CP0C1_WR) | (1 << CP0C1_CA));
+	kvm_write_c0_guest_config1(cop0, config1);
+
+	/* Have config3, no tertiary/secondary caches implemented */
+	kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
+	/* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
+
+	/* Have config4, UserLocal */
+	kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
+
+	/* Have config5 */
+	kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
+
+	/* No config6 */
+	kvm_write_c0_guest_config5(cop0, 0);
+
+	/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+	kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+	/*
+	 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+	 */
+	kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+	/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+	kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+	return 0;
+}
+
+static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
+				     const struct kvm_one_reg *reg,
+				     s64 *v)
+{
+	switch (reg->id) {
+	case KVM_REG_MIPS_CP0_COUNT:
+		*v = kvm_mips_read_count(vcpu);
+		break;
+	case KVM_REG_MIPS_COUNT_CTL:
+		*v = vcpu->arch.count_ctl;
+		break;
+	case KVM_REG_MIPS_COUNT_RESUME:
+		*v = ktime_to_ns(vcpu->arch.count_resume);
+		break;
+	case KVM_REG_MIPS_COUNT_HZ:
+		*v = vcpu->arch.count_hz;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
+				     const struct kvm_one_reg *reg,
+				     s64 v)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	int ret = 0;
+	unsigned int cur, change;
+
+	switch (reg->id) {
+	case KVM_REG_MIPS_CP0_COUNT:
+		kvm_mips_write_count(vcpu, v);
+		break;
+	case KVM_REG_MIPS_CP0_COMPARE:
+		kvm_mips_write_compare(vcpu, v, false);
+		break;
+	case KVM_REG_MIPS_CP0_CAUSE:
+		/*
+		 * If the timer is stopped or started (DC bit) it must look
+		 * atomic with changes to the interrupt pending bits (TI, IRQ5).
+		 * A timer interrupt should not happen in between.
+		 */
+		if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
+			if (v & CAUSEF_DC) {
+				/* disable timer first */
+				kvm_mips_count_disable_cause(vcpu);
+				kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+			} else {
+				/* enable timer last */
+				kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+				kvm_mips_count_enable_cause(vcpu);
+			}
+		} else {
+			kvm_write_c0_guest_cause(cop0, v);
+		}
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG:
+		/* read-only for now */
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG1:
+		cur = kvm_read_c0_guest_config1(cop0);
+		change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
+		if (change) {
+			v = cur ^ change;
+			kvm_write_c0_guest_config1(cop0, v);
+		}
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG2:
+		/* read-only for now */
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG3:
+		cur = kvm_read_c0_guest_config3(cop0);
+		change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
+		if (change) {
+			v = cur ^ change;
+			kvm_write_c0_guest_config3(cop0, v);
+		}
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG4:
+		cur = kvm_read_c0_guest_config4(cop0);
+		change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
+		if (change) {
+			v = cur ^ change;
+			kvm_write_c0_guest_config4(cop0, v);
+		}
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG5:
+		cur = kvm_read_c0_guest_config5(cop0);
+		change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
+		if (change) {
+			v = cur ^ change;
+			kvm_write_c0_guest_config5(cop0, v);
+		}
+		break;
+	case KVM_REG_MIPS_COUNT_CTL:
+		ret = kvm_mips_set_count_ctl(vcpu, v);
+		break;
+	case KVM_REG_MIPS_COUNT_RESUME:
+		ret = kvm_mips_set_count_resume(vcpu, v);
+		break;
+	case KVM_REG_MIPS_COUNT_HZ:
+		ret = kvm_mips_set_count_hz(vcpu, v);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
+{
+	kvm_lose_fpu(vcpu);
+
+	return 0;
+}
+
+static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+	/* exit handlers */
+	.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+	.handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+	.handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+	.handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+	.handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+	.handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+	.handle_syscall = kvm_trap_emul_handle_syscall,
+	.handle_res_inst = kvm_trap_emul_handle_res_inst,
+	.handle_break = kvm_trap_emul_handle_break,
+	.handle_trap = kvm_trap_emul_handle_trap,
+	.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
+	.handle_fpe = kvm_trap_emul_handle_fpe,
+	.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+
+	.vm_init = kvm_trap_emul_vm_init,
+	.vcpu_init = kvm_trap_emul_vcpu_init,
+	.vcpu_setup = kvm_trap_emul_vcpu_setup,
+	.gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+	.queue_timer_int = kvm_mips_queue_timer_int_cb,
+	.dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+	.queue_io_int = kvm_mips_queue_io_int_cb,
+	.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+	.irq_deliver = kvm_mips_irq_deliver_cb,
+	.irq_clear = kvm_mips_irq_clear_cb,
+	.get_one_reg = kvm_trap_emul_get_one_reg,
+	.set_one_reg = kvm_trap_emul_set_one_reg,
+	.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
+	.vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+	*install_callbacks = &kvm_trap_emul_callbacks;
+	return 0;
+}