File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
new file mode 100644
index 0000000..0344e57
--- /dev/null
+++ b/arch/mips/lib/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for MIPS-specific library files..
+#
+
+lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
+	   mips-atomic.o strlen_user.o strncpy_user.o \
+	   strnlen_user.o uncached.o
+
+obj-y			+= iomap.o
+obj-$(CONFIG_PCI)	+= iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM)	:= $(filter-out csum_partial.o, $(lib-y))
+
+obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
+obj-$(CONFIG_CPU_R3000)		+= r3k_dump_tlb.o
+obj-$(CONFIG_CPU_TX39XX)	+= r3k_dump_tlb.o
+
+# libgcc-style stuff needed in the kernel
+obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
new file mode 100644
index 0000000..927dc94
--- /dev/null
+++ b/arch/mips/lib/ashldi3.c
@@ -0,0 +1,29 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+long long notrace __ashldi3(long long u, word_type b)
+{
+	DWunion uu, w;
+	word_type bm;
+
+	if (b == 0)
+		return u;
+
+	uu.ll = u;
+	bm = 32 - b;
+
+	if (bm <= 0) {
+		w.s.low = 0;
+		w.s.high = (unsigned int) uu.s.low << -bm;
+	} else {
+		const unsigned int carries = (unsigned int) uu.s.low >> bm;
+
+		w.s.low = (unsigned int) uu.s.low << b;
+		w.s.high = ((unsigned int) uu.s.high << b) | carries;
+	}
+
+	return w.ll;
+}
+
+EXPORT_SYMBOL(__ashldi3);
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
new file mode 100644
index 0000000..9fdf1a5
--- /dev/null
+++ b/arch/mips/lib/ashrdi3.c
@@ -0,0 +1,31 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+long long notrace __ashrdi3(long long u, word_type b)
+{
+	DWunion uu, w;
+	word_type bm;
+
+	if (b == 0)
+		return u;
+
+	uu.ll = u;
+	bm = 32 - b;
+
+	if (bm <= 0) {
+		/* w.s.high = 1..1 or 0..0 */
+		w.s.high =
+		    uu.s.high >> 31;
+		w.s.low = uu.s.high >> -bm;
+	} else {
+		const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+		w.s.high = uu.s.high >> b;
+		w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+	}
+
+	return w.ll;
+}
+
+EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644
index 0000000..3b2a1e7
--- /dev/null
+++ b/arch/mips/lib/bitops.c
@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory.  This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory.  This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a &= ~mask;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory.	This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a ^= mask;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value.  This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+			    volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	int res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a) != 0;
+	*a |= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value.  This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+				 volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	int res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a) != 0;
+	*a |= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value.  This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	int res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a) != 0;
+	*a &= ~mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value.	This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	unsigned long *a = (unsigned long *)addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	int res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a) != 0;
+	*a ^= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
new file mode 100644
index 0000000..e3e77aa
--- /dev/null
+++ b/arch/mips/lib/bswapdi.c
@@ -0,0 +1,15 @@
+#include <linux/module.h>
+
+unsigned long long notrace __bswapdi2(unsigned long long u)
+{
+	return (((u) & 0xff00000000000000ull) >> 56) |
+	       (((u) & 0x00ff000000000000ull) >> 40) |
+	       (((u) & 0x0000ff0000000000ull) >> 24) |
+	       (((u) & 0x000000ff00000000ull) >>  8) |
+	       (((u) & 0x00000000ff000000ull) <<  8) |
+	       (((u) & 0x0000000000ff0000ull) << 24) |
+	       (((u) & 0x000000000000ff00ull) << 40) |
+	       (((u) & 0x00000000000000ffull) << 56);
+}
+
+EXPORT_SYMBOL(__bswapdi2);
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
new file mode 100644
index 0000000..530a8af
--- /dev/null
+++ b/arch/mips/lib/bswapsi.c
@@ -0,0 +1,11 @@
+#include <linux/module.h>
+
+unsigned int notrace __bswapsi2(unsigned int u)
+{
+	return (((u) & 0xff000000) >> 24) |
+	       (((u) & 0x00ff0000) >>  8) |
+	       (((u) & 0x0000ff00) <<  8) |
+	       (((u) & 0x000000ff) << 24);
+}
+
+EXPORT_SYMBOL(__bswapsi2);
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
new file mode 100644
index 0000000..06857da
--- /dev/null
+++ b/arch/mips/lib/cmpdi2.c
@@ -0,0 +1,27 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+word_type notrace __cmpdi2(long long a, long long b)
+{
+	const DWunion au = {
+		.ll = a
+	};
+	const DWunion bu = {
+		.ll = b
+	};
+
+	if (au.s.high < bu.s.high)
+		return 0;
+	else if (au.s.high > bu.s.high)
+		return 2;
+
+	if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+		return 0;
+	else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+		return 2;
+
+	return 1;
+}
+
+EXPORT_SYMBOL(__cmpdi2);
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 0000000..ed88647
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,841 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Quick'n'dirty IP checksum ...
+ *
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_64BIT
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0	$8
+#define t1	$9
+#define t2	$10
+#define t3	$11
+#define t4	$12
+#define t5	$13
+#define t6	$14
+#define t7	$15
+
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOAD   ld
+#define LOAD32 lwu
+#define ADD    daddu
+#define NBYTES 8
+
+#else
+
+#define LOAD   lw
+#define LOAD32 lw
+#define ADD    addu
+#define NBYTES 4
+
+#endif /* USE_DOUBLE */
+
+#define UNIT(unit)  ((unit)*NBYTES)
+
+#define ADDC(sum,reg)						\
+	.set	push;						\
+	.set	noat;						\
+	ADD	sum, reg;					\
+	sltu	v1, sum, reg;					\
+	ADD	sum, v1;					\
+	.set	pop
+
+#define ADDC32(sum,reg)						\
+	.set	push;						\
+	.set	noat;						\
+	addu	sum, reg;					\
+	sltu	v1, sum, reg;					\
+	addu	sum, v1;					\
+	.set	pop
+
+#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)	\
+	LOAD	_t0, (offset + UNIT(0))(src);			\
+	LOAD	_t1, (offset + UNIT(1))(src);			\
+	LOAD	_t2, (offset + UNIT(2))(src);			\
+	LOAD	_t3, (offset + UNIT(3))(src);			\
+	ADDC(_t0, _t1);						\
+	ADDC(_t2, _t3);						\
+	ADDC(sum, _t0);						\
+	ADDC(sum, _t2)
+
+#ifdef USE_DOUBLE
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
+	CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
+#else
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
+	CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3);	\
+	CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
+#endif
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define sum v0
+
+	.text
+	.set	noreorder
+	.align	5
+LEAF(csum_partial)
+	move	sum, zero
+	move	t7, zero
+
+	sltiu	t8, a1, 0x8
+	bnez	t8, .Lsmall_csumcpy		/* < 8 bytes to copy */
+	 move	t2, a1
+
+	andi	t7, src, 0x1			/* odd buffer? */
+
+.Lhword_align:
+	beqz	t7, .Lword_align
+	 andi	t8, src, 0x2
+
+	lbu	t0, (src)
+	LONG_SUBU	a1, a1, 0x1
+#ifdef __MIPSEL__
+	sll	t0, t0, 8
+#endif
+	ADDC(sum, t0)
+	PTR_ADDU	src, src, 0x1
+	andi	t8, src, 0x2
+
+.Lword_align:
+	beqz	t8, .Ldword_align
+	 sltiu	t8, a1, 56
+
+	lhu	t0, (src)
+	LONG_SUBU	a1, a1, 0x2
+	ADDC(sum, t0)
+	sltiu	t8, a1, 56
+	PTR_ADDU	src, src, 0x2
+
+.Ldword_align:
+	bnez	t8, .Ldo_end_words
+	 move	t8, a1
+
+	andi	t8, src, 0x4
+	beqz	t8, .Lqword_align
+	 andi	t8, src, 0x8
+
+	LOAD32	t0, 0x00(src)
+	LONG_SUBU	a1, a1, 0x4
+	ADDC(sum, t0)
+	PTR_ADDU	src, src, 0x4
+	andi	t8, src, 0x8
+
+.Lqword_align:
+	beqz	t8, .Loword_align
+	 andi	t8, src, 0x10
+
+#ifdef USE_DOUBLE
+	ld	t0, 0x00(src)
+	LONG_SUBU	a1, a1, 0x8
+	ADDC(sum, t0)
+#else
+	lw	t0, 0x00(src)
+	lw	t1, 0x04(src)
+	LONG_SUBU	a1, a1, 0x8
+	ADDC(sum, t0)
+	ADDC(sum, t1)
+#endif
+	PTR_ADDU	src, src, 0x8
+	andi	t8, src, 0x10
+
+.Loword_align:
+	beqz	t8, .Lbegin_movement
+	 LONG_SRL	t8, a1, 0x7
+
+#ifdef USE_DOUBLE
+	ld	t0, 0x00(src)
+	ld	t1, 0x08(src)
+	ADDC(sum, t0)
+	ADDC(sum, t1)
+#else
+	CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
+#endif
+	LONG_SUBU	a1, a1, 0x10
+	PTR_ADDU	src, src, 0x10
+	LONG_SRL	t8, a1, 0x7
+
+.Lbegin_movement:
+	beqz	t8, 1f
+	 andi	t2, a1, 0x40
+
+.Lmove_128bytes:
+	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+	CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
+	CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
+	LONG_SUBU	t8, t8, 0x01
+	.set	reorder				/* DADDI_WAR */
+	PTR_ADDU	src, src, 0x80
+	bnez	t8, .Lmove_128bytes
+	.set	noreorder
+
+1:
+	beqz	t2, 1f
+	 andi	t2, a1, 0x20
+
+.Lmove_64bytes:
+	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+	PTR_ADDU	src, src, 0x40
+
+1:
+	beqz	t2, .Ldo_end_words
+	 andi	t8, a1, 0x1c
+
+.Lmove_32bytes:
+	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+	andi	t8, a1, 0x1c
+	PTR_ADDU	src, src, 0x20
+
+.Ldo_end_words:
+	beqz	t8, .Lsmall_csumcpy
+	 andi	t2, a1, 0x3
+	LONG_SRL	t8, t8, 0x2
+
+.Lend_words:
+	LOAD32	t0, (src)
+	LONG_SUBU	t8, t8, 0x1
+	ADDC(sum, t0)
+	.set	reorder				/* DADDI_WAR */
+	PTR_ADDU	src, src, 0x4
+	bnez	t8, .Lend_words
+	.set	noreorder
+
+/* unknown src alignment and < 8 bytes to go  */
+.Lsmall_csumcpy:
+	move	a1, t2
+
+	andi	t0, a1, 4
+	beqz	t0, 1f
+	 andi	t0, a1, 2
+
+	/* Still a full word to go  */
+	ulw	t1, (src)
+	PTR_ADDIU	src, 4
+#ifdef USE_DOUBLE
+	dsll	t1, t1, 32			/* clear lower 32bit */
+#endif
+	ADDC(sum, t1)
+
+1:	move	t1, zero
+	beqz	t0, 1f
+	 andi	t0, a1, 1
+
+	/* Still a halfword to go  */
+	ulhu	t1, (src)
+	PTR_ADDIU	src, 2
+
+1:	beqz	t0, 1f
+	 sll	t1, t1, 16
+
+	lbu	t2, (src)
+	 nop
+
+#ifdef __MIPSEB__
+	sll	t2, t2, 8
+#endif
+	or	t1, t2
+
+1:	ADDC(sum, t1)
+
+	/* fold checksum */
+#ifdef USE_DOUBLE
+	dsll32	v1, sum, 0
+	daddu	sum, v1
+	sltu	v1, sum, v1
+	dsra32	sum, sum, 0
+	addu	sum, v1
+#endif
+
+	/* odd buffer alignment? */
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
+	.set	push
+	.set	arch=mips32r2
+	wsbh	v1, sum
+	movn	sum, v1, t7
+	.set	pop
+#else
+	beqz	t7, 1f			/* odd buffer alignment? */
+	 lui	v1, 0x00ff
+	addu	v1, 0x00ff
+	and	t0, sum, v1
+	sll	t0, t0, 8
+	srl	sum, sum, 8
+	and	sum, sum, v1
+	or	sum, sum, t0
+1:
+#endif
+	.set	reorder
+	/* Add the passed partial csum.	 */
+	ADDC32(sum, a2)
+	jr	ra
+	.set	noreorder
+	END(csum_partial)
+
+
+/*
+ * checksum and copy routines based on memcpy.S
+ *
+ *	csum_partial_copy_nocheck(src, dst, len, sum)
+ *	__csum_partial_copy_kernel(src, dst, len, sum, errp)
+ *
+ * See "Spec" in memcpy.S for details.	Unlike __copy_user, all
+ * function in this file use the standard calling convention.
+ */
+
+#define src a0
+#define dst a1
+#define len a2
+#define psum a3
+#define sum v0
+#define odd t8
+#define errptr t9
+
+/*
+ * The exception handler for loads requires that:
+ *  1- AT contain the address of the byte just past the end of the source
+ *     of the copy,
+ *  2- src_entry <= src < AT, and
+ *  3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by __csum_partial_copy_from_user and maintained by
+ *	not writing AT in __csum_partial_copy
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores stores -EFAULT to errptr and return.
+ * These handlers do not need to overwrite any data.
+ */
+
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+#define LEGACY_MODE 1
+#define EVA_MODE    2
+#define USEROP   1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn    : Load/store instruction
+ * type    : Instruction type
+ * reg     : Register
+ * addr    : Address
+ * handler : Exception handler
+ */
+#define EXC(insn, type, reg, addr, handler)	\
+	.if \mode == LEGACY_MODE;		\
+9:		insn reg, addr;			\
+		.section __ex_table,"a";	\
+		PTR	9b, handler;		\
+		.previous;			\
+	/* This is enabled in EVA mode */	\
+	.else;					\
+		/* If loading from user or storing to user */	\
+		.if ((\from == USEROP) && (type == LD_INSN)) || \
+		    ((\to == USEROP) && (type == ST_INSN));	\
+9:			__BUILD_EVA_INSN(insn##e, reg, addr);	\
+			.section __ex_table,"a";		\
+			PTR	9b, handler;			\
+			.previous;				\
+		.else;						\
+			/* EVA without exception */		\
+			insn reg, addr;				\
+		.endif;						\
+	.endif
+
+#undef LOAD
+
+#ifdef USE_DOUBLE
+
+#define LOADK	ld /* No exception */
+#define LOAD(reg, addr, handler)	EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADBU(reg, addr, handler)	EXC(lbu, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)	EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)	EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)	EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)	EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)	EXC(sd, ST_INSN, reg, addr, handler)
+#define ADD    daddu
+#define SUB    dsubu
+#define SRL    dsrl
+#define SLL    dsll
+#define SLLV   dsllv
+#define SRLV   dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+#else
+
+#define LOADK	lw /* No exception */
+#define LOAD(reg, addr, handler)	EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADBU(reg, addr, handler)	EXC(lbu, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)	EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)	EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)	EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)	EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)	EXC(sw, ST_INSN, reg, addr, handler)
+#define ADD    addu
+#define SUB    subu
+#define SRL    srl
+#define SLL    sll
+#define SLLV   sllv
+#define SRLV   srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST	LOADL
+#define STFIRST STORER
+#define STREST	STOREL
+#define SHIFT_DISCARD SLLV
+#define SHIFT_DISCARD_REVERT SRLV
+#else
+#define LDFIRST LOADL
+#define LDREST	LOADR
+#define STFIRST STOREL
+#define STREST	STORER
+#define SHIFT_DISCARD SRLV
+#define SHIFT_DISCARD_REVERT SLLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit)  (FIRST(unit)+NBYTES-1)
+
+#define ADDRMASK (NBYTES-1)
+
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+	.set	noat
+#else
+	.set	at=v1
+#endif
+
+	.macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+
+	PTR_ADDU	AT, src, len	/* See (1) above. */
+	/* initialize __nocheck if this the first time we execute this
+	 * macro
+	 */
+#ifdef CONFIG_64BIT
+	move	errptr, a4
+#else
+	lw	errptr, 16(sp)
+#endif
+	.if \__nocheck == 1
+	FEXPORT(csum_partial_copy_nocheck)
+	.endif
+	move	sum, zero
+	move	odd, zero
+	/*
+	 * Note: dst & src may be unaligned, len may be 0
+	 * Temps
+	 */
+	/*
+	 * The "issue break"s below are very approximate.
+	 * Issue delays for dcache fills will perturb the schedule, as will
+	 * load queue full replay traps, etc.
+	 *
+	 * If len < NBYTES use byte operations.
+	 */
+	sltu	t2, len, NBYTES
+	and	t1, dst, ADDRMASK
+	bnez	t2, .Lcopy_bytes_checklen\@
+	 and	t0, src, ADDRMASK
+	andi	odd, dst, 0x1			/* odd buffer? */
+	bnez	t1, .Ldst_unaligned\@
+	 nop
+	bnez	t0, .Lsrc_unaligned_dst_aligned\@
+	/*
+	 * use delay slot for fall-through
+	 * src and dst are aligned; need to compute rem
+	 */
+.Lboth_aligned\@:
+	 SRL	t0, len, LOG_NBYTES+3	 # +3 for 8 units/iter
+	beqz	t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
+	 nop
+	SUB	len, 8*NBYTES		# subtract here for bgez loop
+	.align	4
+1:
+	LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+	LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+	LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
+	LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
+	LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+	SUB	len, len, 8*NBYTES
+	ADD	src, src, 8*NBYTES
+	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)
+	ADDC(t0, t1)
+	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)
+	ADDC(sum, t0)
+	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)
+	ADDC(t2, t3)
+	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)
+	ADDC(sum, t2)
+	STORE(t4, UNIT(4)(dst),	.Ls_exc\@)
+	ADDC(t4, t5)
+	STORE(t5, UNIT(5)(dst),	.Ls_exc\@)
+	ADDC(sum, t4)
+	STORE(t6, UNIT(6)(dst),	.Ls_exc\@)
+	ADDC(t6, t7)
+	STORE(t7, UNIT(7)(dst),	.Ls_exc\@)
+	ADDC(sum, t6)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 8*NBYTES
+	bgez	len, 1b
+	.set	noreorder
+	ADD	len, 8*NBYTES		# revert len (see above)
+
+	/*
+	 * len == the number of bytes left to copy < 8*NBYTES
+	 */
+.Lcleanup_both_aligned\@:
+#define rem t7
+	beqz	len, .Ldone\@
+	 sltu	t0, len, 4*NBYTES
+	bnez	t0, .Lless_than_4units\@
+	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES
+	/*
+	 * len >= 4*NBYTES
+	 */
+	LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+	SUB	len, len, 4*NBYTES
+	ADD	src, src, 4*NBYTES
+	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)
+	ADDC(t0, t1)
+	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)
+	ADDC(sum, t0)
+	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)
+	ADDC(t2, t3)
+	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)
+	ADDC(sum, t2)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 4*NBYTES
+	beqz	len, .Ldone\@
+	.set	noreorder
+.Lless_than_4units\@:
+	/*
+	 * rem = len % NBYTES
+	 */
+	beq	rem, len, .Lcopy_bytes\@
+	 nop
+1:
+	LOAD(t0, 0(src), .Ll_exc\@)
+	ADD	src, src, NBYTES
+	SUB	len, len, NBYTES
+	STORE(t0, 0(dst), .Ls_exc\@)
+	ADDC(sum, t0)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, NBYTES
+	bne	rem, len, 1b
+	.set	noreorder
+
+	/*
+	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+	 * A loop would do only a byte at a time with possible branch
+	 * mispredicts.	 Can't do an explicit LOAD dst,mask,or,STORE
+	 * because can't assume read-access to dst.  Instead, use
+	 * STREST dst, which doesn't require read access to dst.
+	 *
+	 * This code should perform better than a simple loop on modern,
+	 * wide-issue mips processors because the code has fewer branches and
+	 * more instruction-level parallelism.
+	 */
+#define bits t2
+	beqz	len, .Ldone\@
+	 ADD	t1, dst, len	# t1 is just past last byte of dst
+	li	bits, 8*NBYTES
+	SLL	rem, len, 3	# rem = number of bits to keep
+	LOAD(t0, 0(src), .Ll_exc\@)
+	SUB	bits, bits, rem # bits = number of bits to discard
+	SHIFT_DISCARD t0, t0, bits
+	STREST(t0, -1(t1), .Ls_exc\@)
+	SHIFT_DISCARD_REVERT t0, t0, bits
+	.set reorder
+	ADDC(sum, t0)
+	b	.Ldone\@
+	.set noreorder
+.Ldst_unaligned\@:
+	/*
+	 * dst is unaligned
+	 * t0 = src & ADDRMASK
+	 * t1 = dst & ADDRMASK; T1 > 0
+	 * len >= NBYTES
+	 *
+	 * Copy enough bytes to align dst
+	 * Set match = (src and dst have same alignment)
+	 */
+#define match rem
+	LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+	ADD	t2, zero, NBYTES
+	LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+	SUB	t2, t2, t1	# t2 = number of bytes copied
+	xor	match, t0, t1
+	STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+	SLL	t4, t1, 3		# t4 = number of bits to discard
+	SHIFT_DISCARD t3, t3, t4
+	/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
+	ADDC(sum, t3)
+	beq	len, t2, .Ldone\@
+	 SUB	len, len, t2
+	ADD	dst, dst, t2
+	beqz	match, .Lboth_aligned\@
+	 ADD	src, src, t2
+
+.Lsrc_unaligned_dst_aligned\@:
+	SRL	t0, len, LOG_NBYTES+2	 # +2 for 4 units/iter
+	beqz	t0, .Lcleanup_src_unaligned\@
+	 and	rem, len, (4*NBYTES-1)	 # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+	LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+	SUB	len, len, 4*NBYTES
+	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+	LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+	LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+	LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+	LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+	LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+	ADD	src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+	nop				# improves slotting
+#endif
+	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)
+	ADDC(t0, t1)
+	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)
+	ADDC(sum, t0)
+	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)
+	ADDC(t2, t3)
+	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)
+	ADDC(sum, t2)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 4*NBYTES
+	bne	len, rem, 1b
+	.set	noreorder
+
+.Lcleanup_src_unaligned\@:
+	beqz	len, .Ldone\@
+	 and	rem, len, NBYTES-1  # rem = len % NBYTES
+	beq	rem, len, .Lcopy_bytes\@
+	 nop
+1:
+	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+	ADD	src, src, NBYTES
+	SUB	len, len, NBYTES
+	STORE(t0, 0(dst), .Ls_exc\@)
+	ADDC(sum, t0)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, NBYTES
+	bne	len, rem, 1b
+	.set	noreorder
+
+.Lcopy_bytes_checklen\@:
+	beqz	len, .Ldone\@
+	 nop
+.Lcopy_bytes\@:
+	/* 0 < len < NBYTES  */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define SHIFT_START 0
+#define SHIFT_INC 8
+#else
+#define SHIFT_START 8*(NBYTES-1)
+#define SHIFT_INC -8
+#endif
+	move	t2, zero	# partial word
+	li	t3, SHIFT_START # shift
+/* use .Ll_exc_copy here to return correct sum on fault */
+#define COPY_BYTE(N)			\
+	LOADBU(t0, N(src), .Ll_exc_copy\@);	\
+	SUB	len, len, 1;		\
+	STOREB(t0, N(dst), .Ls_exc\@);	\
+	SLLV	t0, t0, t3;		\
+	addu	t3, SHIFT_INC;		\
+	beqz	len, .Lcopy_bytes_done\@; \
+	 or	t2, t0
+
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+#ifdef USE_DOUBLE
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+#endif
+	LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+	SUB	len, len, 1
+	STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+	SLLV	t0, t0, t3
+	or	t2, t0
+.Lcopy_bytes_done\@:
+	ADDC(sum, t2)
+.Ldone\@:
+	/* fold checksum */
+	.set	push
+	.set	noat
+#ifdef USE_DOUBLE
+	dsll32	v1, sum, 0
+	daddu	sum, v1
+	sltu	v1, sum, v1
+	dsra32	sum, sum, 0
+	addu	sum, v1
+#endif
+
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
+	.set	push
+	.set	arch=mips32r2
+	wsbh	v1, sum
+	movn	sum, v1, odd
+	.set	pop
+#else
+	beqz	odd, 1f			/* odd buffer alignment? */
+	 lui	v1, 0x00ff
+	addu	v1, 0x00ff
+	and	t0, sum, v1
+	sll	t0, t0, 8
+	srl	sum, sum, 8
+	and	sum, sum, v1
+	or	sum, sum, t0
+1:
+#endif
+	.set	pop
+	.set reorder
+	ADDC32(sum, psum)
+	jr	ra
+	.set noreorder
+
+.Ll_exc_copy\@:
+	/*
+	 * Copy bytes from src until faulting load address (or until a
+	 * lb faults)
+	 *
+	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+	 * may be more than a byte beyond the last address.
+	 * Hence, the lb below may get an exception.
+	 *
+	 * Assumes src < THREAD_BUADDR($28)
+	 */
+	LOADK	t0, TI_TASK($28)
+	 li	t2, SHIFT_START
+	LOADK	t0, THREAD_BUADDR(t0)
+1:
+	LOADBU(t1, 0(src), .Ll_exc\@)
+	ADD	src, src, 1
+	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
+	SLLV	t1, t1, t2
+	addu	t2, SHIFT_INC
+	ADDC(sum, t1)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 1
+	bne	src, t0, 1b
+	.set	noreorder
+.Ll_exc\@:
+	LOADK	t0, TI_TASK($28)
+	 nop
+	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
+	 nop
+	SUB	len, AT, t0		# len number of uncopied bytes
+	/*
+	 * Here's where we rely on src and dst being incremented in tandem,
+	 *   See (3) above.
+	 * dst += (fault addr - src) to put dst at first byte to clear
+	 */
+	ADD	dst, t0			# compute start address in a1
+	SUB	dst, src
+	/*
+	 * Clear len bytes starting at dst.  Can't call __bzero because it
+	 * might modify len.  An inefficient loop for these rare times...
+	 */
+	.set	reorder				/* DADDI_WAR */
+	SUB	src, len, 1
+	beqz	len, .Ldone\@
+	.set	noreorder
+1:	sb	zero, 0(dst)
+	ADD	dst, dst, 1
+	.set	push
+	.set	noat
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+	bnez	src, 1b
+	 SUB	src, src, 1
+#else
+	li	v1, 1
+	bnez	src, 1b
+	 SUB	src, src, v1
+#endif
+	li	v1, -EFAULT
+	b	.Ldone\@
+	 sw	v1, (errptr)
+
+.Ls_exc\@:
+	li	v0, -1 /* invalid checksum */
+	li	v1, -EFAULT
+	jr	ra
+	 sw	v1, (errptr)
+	.set	pop
+	.endm
+
+LEAF(__csum_partial_copy_kernel)
+#ifndef CONFIG_EVA
+FEXPORT(__csum_partial_copy_to_user)
+FEXPORT(__csum_partial_copy_from_user)
+#endif
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
+END(__csum_partial_copy_kernel)
+
+#ifdef CONFIG_EVA
+LEAF(__csum_partial_copy_to_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+END(__csum_partial_copy_to_user)
+
+LEAF(__csum_partial_copy_from_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+END(__csum_partial_copy_from_user)
+#endif
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
new file mode 100644
index 0000000..21d27c6
--- /dev/null
+++ b/arch/mips/lib/delay.c
@@ -0,0 +1,64 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Waldorf Electronics
+ * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007, 2014 Maciej W. Rozycki
+ */
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/smp.h>
+#include <linux/stringify.h>
+
+#include <asm/asm.h>
+#include <asm/compiler.h>
+#include <asm/war.h>
+
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+#define GCC_DADDI_IMM_ASM() "I"
+#else
+#define GCC_DADDI_IMM_ASM() "r"
+#endif
+
+void __delay(unsigned long loops)
+{
+	__asm__ __volatile__ (
+	"	.set	noreorder				\n"
+	"	.align	3					\n"
+	"1:	bnez	%0, 1b					\n"
+	"	 " __stringify(LONG_SUBU) "	%0, %1		\n"
+	"	.set	reorder					\n"
+	: "=r" (loops)
+	: GCC_DADDI_IMM_ASM() (1), "0" (loops));
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * Division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec).	Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays.  This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+
+void __udelay(unsigned long us)
+{
+	unsigned int lpj = raw_current_cpu_data.udelay_val;
+
+	__delay((us * 0x000010c7ull * HZ * lpj) >> 32);
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long ns)
+{
+	unsigned int lpj = raw_current_cpu_data.udelay_val;
+
+	__delay((ns * 0x00000005ull * HZ * lpj) >> 32);
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
new file mode 100644
index 0000000..92a3731
--- /dev/null
+++ b/arch/mips/lib/dump_tlb.c
@@ -0,0 +1,172 @@
+/*
+ * Dump R4x00 TLB for debugging purposes.
+ *
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/tlbdebug.h>
+
+void dump_tlb_regs(void)
+{
+	const int field = 2 * sizeof(unsigned long);
+
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("PageMask : %0x\n", read_c0_pagemask());
+	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
+	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
+	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
+	pr_info("Wired    : %0x\n", read_c0_wired());
+	switch (current_cpu_type()) {
+	case CPU_R10000:
+	case CPU_R12000:
+	case CPU_R14000:
+	case CPU_R16000:
+		pr_info("FrameMask: %0x\n", read_c0_framemask());
+		break;
+	}
+	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
+		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
+	if (cpu_has_htw) {
+		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
+		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
+		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
+	}
+}
+
+static inline const char *msk2str(unsigned int mask)
+{
+	switch (mask) {
+	case PM_4K:	return "4kb";
+	case PM_16K:	return "16kb";
+	case PM_64K:	return "64kb";
+	case PM_256K:	return "256kb";
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	case PM_8K:	return "8kb";
+	case PM_32K:	return "32kb";
+	case PM_128K:	return "128kb";
+	case PM_512K:	return "512kb";
+	case PM_2M:	return "2Mb";
+	case PM_8M:	return "8Mb";
+	case PM_32M:	return "32Mb";
+#endif
+#ifndef CONFIG_CPU_VR41XX
+	case PM_1M:	return "1Mb";
+	case PM_4M:	return "4Mb";
+	case PM_16M:	return "16Mb";
+	case PM_64M:	return "64Mb";
+	case PM_256M:	return "256Mb";
+	case PM_1G:	return "1Gb";
+#endif
+	}
+	return "";
+}
+
+static void dump_tlb(int first, int last)
+{
+	unsigned long s_entryhi, entryhi, asid;
+	unsigned long long entrylo0, entrylo1, pa;
+	unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
+#ifdef CONFIG_32BIT
+	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
+	int pwidth = xpa ? 11 : 8;
+	int vwidth = 8;
+#else
+	bool xpa = false;
+	int pwidth = 11;
+	int vwidth = 11;
+#endif
+
+	s_pagemask = read_c0_pagemask();
+	s_entryhi = read_c0_entryhi();
+	s_index = read_c0_index();
+	asid = s_entryhi & 0xff;
+
+	for (i = first; i <= last; i++) {
+		write_c0_index(i);
+		mtc0_tlbr_hazard();
+		tlb_read();
+		tlb_read_hazard();
+		pagemask = read_c0_pagemask();
+		entryhi	 = read_c0_entryhi();
+		entrylo0 = read_c0_entrylo0();
+		entrylo1 = read_c0_entrylo1();
+
+		/* EHINV bit marks entire entry as invalid */
+		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
+			continue;
+		/*
+		 * Prior to tlbinv, unused entries have a virtual address of
+		 * CKSEG0.
+		 */
+		if ((entryhi & ~0x1ffffUL) == CKSEG0)
+			continue;
+		/*
+		 * ASID takes effect in absence of G (global) bit.
+		 * We check both G bits, even though architecturally they should
+		 * match one another, because some revisions of the SB1 core may
+		 * leave only a single G bit set after a machine check exception
+		 * due to duplicate TLB entry.
+		 */
+		if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
+		    (entryhi & 0xff) != asid)
+			continue;
+
+		/*
+		 * Only print entries in use
+		 */
+		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
+
+		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+
+		printk("va=%0*lx asid=%02lx\n",
+		       vwidth, (entryhi & ~0x1fffUL),
+		       entryhi & 0xff);
+		/* RI/XI are in awkward places, so mask them off separately */
+		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
+		if (xpa)
+			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
+		pa = (pa << 6) & PAGE_MASK;
+		printk("\t[");
+		if (cpu_has_rixi)
+			printk("ri=%d xi=%d ",
+			       (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+		       pwidth, pa, c0,
+		       (entrylo0 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_G) ? 1 : 0);
+		/* RI/XI are in awkward places, so mask them off separately */
+		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
+		if (xpa)
+			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
+		pa = (pa << 6) & PAGE_MASK;
+		if (cpu_has_rixi)
+			printk("ri=%d xi=%d ",
+			       (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+		       pwidth, pa, c1,
+		       (entrylo1 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_G) ? 1 : 0);
+	}
+	printk("\n");
+
+	write_c0_entryhi(s_entryhi);
+	write_c0_index(s_index);
+	write_c0_pagemask(s_pagemask);
+}
+
+void dump_tlb_all(void)
+{
+	dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
new file mode 100644
index 0000000..fd35daa
--- /dev/null
+++ b/arch/mips/lib/iomap-pci.c
@@ -0,0 +1,48 @@
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org>
+ * (C) Copyright 2007 MIPS Technologies, Inc.
+ *     written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+void __iomem *__pci_ioport_map(struct pci_dev *dev,
+			       unsigned long port, unsigned int nr)
+{
+	struct pci_controller *ctrl = dev->bus->sysdata;
+	unsigned long base = ctrl->io_map_base;
+
+	/* This will eventually become a BUG_ON but for now be gentle */
+	if (unlikely(!ctrl->io_map_base)) {
+		struct pci_bus *bus = dev->bus;
+		char name[8];
+
+		while (bus->parent)
+			bus = bus->parent;
+
+		ctrl->io_map_base = base = mips_io_port_base;
+
+		sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
+		printk(KERN_WARNING "io_map_base of root PCI bus %s unset.  "
+		       "Trying to continue but you better\nfix this issue or "
+		       "report it to linux-mips@linux-mips.org or your "
+		       "vendor.\n", name);
+#ifdef CONFIG_PCI_DOMAINS
+		panic("To avoid data corruption io_map_base MUST be set with "
+		      "multiple PCI domains.");
+#endif
+	}
+
+	return (void __iomem *) (ctrl->io_map_base + port);
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	iounmap(addr);
+}
+
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
new file mode 100644
index 0000000..8e7e378
--- /dev/null
+++ b/arch/mips/lib/iomap.c
@@ -0,0 +1,226 @@
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org>
+ * (C) Copyright 2007 MIPS Technologies, Inc.
+ *     written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <asm/io.h>
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines don't assume any hardware mappings, and just
+ * encode the PIO/MMIO as part of the cookie. They coldly assume that
+ * the MMIO IO mappings are not in the low address range.
+ *
+ * Architectures for which this is not true can't use this generic
+ * implementation and should do their own copy.
+ */
+
+#define PIO_MASK	0x0ffffUL
+
+unsigned int ioread8(void __iomem *addr)
+{
+	return readb(addr);
+}
+
+EXPORT_SYMBOL(ioread8);
+
+unsigned int ioread16(void __iomem *addr)
+{
+	return readw(addr);
+}
+
+EXPORT_SYMBOL(ioread16);
+
+unsigned int ioread16be(void __iomem *addr)
+{
+	return be16_to_cpu(__raw_readw(addr));
+}
+
+EXPORT_SYMBOL(ioread16be);
+
+unsigned int ioread32(void __iomem *addr)
+{
+	return readl(addr);
+}
+
+EXPORT_SYMBOL(ioread32);
+
+unsigned int ioread32be(void __iomem *addr)
+{
+	return be32_to_cpu(__raw_readl(addr));
+}
+
+EXPORT_SYMBOL(ioread32be);
+
+void iowrite8(u8 val, void __iomem *addr)
+{
+	writeb(val, addr);
+}
+
+EXPORT_SYMBOL(iowrite8);
+
+void iowrite16(u16 val, void __iomem *addr)
+{
+	writew(val, addr);
+}
+
+EXPORT_SYMBOL(iowrite16);
+
+void iowrite16be(u16 val, void __iomem *addr)
+{
+	__raw_writew(cpu_to_be16(val), addr);
+}
+
+EXPORT_SYMBOL(iowrite16be);
+
+void iowrite32(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+}
+
+EXPORT_SYMBOL(iowrite32);
+
+void iowrite32be(u32 val, void __iomem *addr)
+{
+	__raw_writel(cpu_to_be32(val), addr);
+}
+
+EXPORT_SYMBOL(iowrite32be);
+
+/*
+ * These are the "repeat MMIO read/write" functions.
+ * Note the "__mem" accesses, since we want to convert
+ * to CPU byte order if the host bus happens to not match the
+ * endianness of PCI/ISA (see mach-generic/mangle-port.h).
+ */
+static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+{
+	while (--count >= 0) {
+		u8 data = __mem_readb(addr);
+		*dst = data;
+		dst++;
+	}
+}
+
+static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+{
+	while (--count >= 0) {
+		u16 data = __mem_readw(addr);
+		*dst = data;
+		dst++;
+	}
+}
+
+static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+{
+	while (--count >= 0) {
+		u32 data = __mem_readl(addr);
+		*dst = data;
+		dst++;
+	}
+}
+
+static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
+{
+	while (--count >= 0) {
+		__mem_writeb(*src, addr);
+		src++;
+	}
+}
+
+static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
+{
+	while (--count >= 0) {
+		__mem_writew(*src, addr);
+		src++;
+	}
+}
+
+static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
+{
+	while (--count >= 0) {
+		__mem_writel(*src, addr);
+		src++;
+	}
+}
+
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	mmio_insb(addr, dst, count);
+}
+
+EXPORT_SYMBOL(ioread8_rep);
+
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	mmio_insw(addr, dst, count);
+}
+
+EXPORT_SYMBOL(ioread16_rep);
+
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	mmio_insl(addr, dst, count);
+}
+
+EXPORT_SYMBOL(ioread32_rep);
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	mmio_outsb(addr, src, count);
+}
+
+EXPORT_SYMBOL(iowrite8_rep);
+
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	mmio_outsw(addr, src, count);
+}
+
+EXPORT_SYMBOL(iowrite16_rep);
+
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	mmio_outsl(addr, src, count);
+}
+
+EXPORT_SYMBOL(iowrite32_rep);
+
+/*
+ * Create a virtual mapping cookie for an IO port range
+ *
+ * This uses the same mapping are as the in/out family which has to be setup
+ * by the platform initialization code.
+ *
+ * Just to make matters somewhat more interesting on MIPS systems with
+ * multiple host bridge each will have it's own ioport address space.
+ */
+static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr)
+{
+	return (void __iomem *) (mips_io_port_base + port);
+}
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	if (port > PIO_MASK)
+		return NULL;
+
+	return ioport_map_legacy(port, nr);
+}
+
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+
+EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
new file mode 100644
index 0000000..05909d5
--- /dev/null
+++ b/arch/mips/lib/libgcc.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_LIBGCC_H
+#define __ASM_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+	int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+	int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+	struct DWstruct s;
+	long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
new file mode 100644
index 0000000..3645474
--- /dev/null
+++ b/arch/mips/lib/lshrdi3.c
@@ -0,0 +1,29 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+long long notrace __lshrdi3(long long u, word_type b)
+{
+	DWunion uu, w;
+	word_type bm;
+
+	if (b == 0)
+		return u;
+
+	uu.ll = u;
+	bm = 32 - b;
+
+	if (bm <= 0) {
+		w.s.high = 0;
+		w.s.low = (unsigned int) uu.s.high >> -bm;
+	} else {
+		const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+		w.s.high = (unsigned int) uu.s.high >> b;
+		w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+	}
+
+	return w.ll;
+}
+
+EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
new file mode 100644
index 0000000..9245e17
--- /dev/null
+++ b/arch/mips/lib/memcpy.S
@@ -0,0 +1,739 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ *   memcpy/copy_user author: Mark Vandevoorde
+ * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+/*
+ * Hack to resolve longstanding prefetch issue
+ *
+ * Prefetching may be fatal on some systems if we're prefetching beyond the
+ * end of memory on some systems.  It's also a seriously bad idea on non
+ * dma-coherent systems.
+ */
+#ifdef CONFIG_DMA_NONCOHERENT
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
+#ifdef CONFIG_MIPS_MALTA
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ *   - src and dst don't overlap
+ *   - src is readable
+ *   - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ *   copy_to_user
+ *     - src is readable  (no exceptions when reading src)
+ *   copy_from_user
+ *     - dst is writable  (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * include/asm-mips/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ *  1- AT contain the address of the byte just past the end of the source
+ *     of the copy,
+ *  2- src_entry <= src < AT, and
+ *  3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+/* Pretech type */
+#define SRC_PREFETCH 1
+#define DST_PREFETCH 2
+#define LEGACY_MODE 1
+#define EVA_MODE    2
+#define USEROP   1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn    : Load/store instruction
+ * type    : Instruction type
+ * reg     : Register
+ * addr    : Address
+ * handler : Exception handler
+ */
+
+#define EXC(insn, type, reg, addr, handler)			\
+	.if \mode == LEGACY_MODE;				\
+9:		insn reg, addr;					\
+		.section __ex_table,"a";			\
+		PTR	9b, handler;				\
+		.previous;					\
+	/* This is assembled in EVA mode */			\
+	.else;							\
+		/* If loading from user or storing to user */	\
+		.if ((\from == USEROP) && (type == LD_INSN)) || \
+		    ((\to == USEROP) && (type == ST_INSN));	\
+9:			__BUILD_EVA_INSN(insn##e, reg, addr);	\
+			.section __ex_table,"a";		\
+			PTR	9b, handler;			\
+			.previous;				\
+		.else;						\
+			/*					\
+			 *  Still in EVA, but no need for	\
+			 * exception handler or EVA insn	\
+			 */					\
+			insn reg, addr;				\
+		.endif;						\
+	.endif
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+#ifdef CONFIG_64BIT
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOADK ld /* No exception */
+#define LOAD(reg, addr, handler)	EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)	EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)	EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)	EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)	EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)	EXC(sd, ST_INSN, reg, addr, handler)
+#define ADD    daddu
+#define SUB    dsubu
+#define SRL    dsrl
+#define SRA    dsra
+#define SLL    dsll
+#define SLLV   dsllv
+#define SRLV   dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0	$8
+#define t1	$9
+#define t2	$10
+#define t3	$11
+#define t4	$12
+#define t5	$13
+#define t6	$14
+#define t7	$15
+
+#else
+
+#define LOADK lw /* No exception */
+#define LOAD(reg, addr, handler)	EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)	EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)	EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)	EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)	EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)	EXC(sw, ST_INSN, reg, addr, handler)
+#define ADD    addu
+#define SUB    subu
+#define SRL    srl
+#define SLL    sll
+#define SRA    sra
+#define SLLV   sllv
+#define SRLV   srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#define LOADB(reg, addr, handler)	EXC(lb, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler)
+
+#define _PREF(hint, addr, type)						\
+	.if \mode == LEGACY_MODE;					\
+		PREF(hint, addr);					\
+	.else;								\
+		.if ((\from == USEROP) && (type == SRC_PREFETCH)) ||	\
+		    ((\to == USEROP) && (type == DST_PREFETCH));	\
+			/*						\
+			 * PREFE has only 9 bits for the offset		\
+			 * compared to PREF which has 16, so it may	\
+			 * need to use the $at register but this	\
+			 * register should remain intact because it's	\
+			 * used later on. Therefore use $v1.		\
+			 */						\
+			.set at=v1;					\
+			PREFE(hint, addr);				\
+			.set noat;					\
+		.else;							\
+			PREF(hint, addr);				\
+		.endif;							\
+	.endif
+
+#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
+#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST	LOADL
+#define STFIRST STORER
+#define STREST	STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST	LOADR
+#define STFIRST STOREL
+#define STREST	STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit)  (FIRST(unit)+NBYTES-1)
+#define UNIT(unit)  FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+	.text
+	.set	noreorder
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+	.set	noat
+#else
+	.set	at=v1
+#endif
+
+	.align	5
+
+	/*
+	 * Macro to build the __copy_user common code
+	 * Arguements:
+	 * mode : LEGACY_MODE or EVA_MODE
+	 * from : Source operand. USEROP or KERNELOP
+	 * to   : Destination operand. USEROP or KERNELOP
+	 */
+	.macro __BUILD_COPY_USER mode, from, to
+
+	/* initialize __memcpy if this the first time we execute this macro */
+	.ifnotdef __memcpy
+	.set __memcpy, 1
+	.hidden __memcpy /* make sure it does not leak */
+	.endif
+
+	/*
+	 * Note: dst & src may be unaligned, len may be 0
+	 * Temps
+	 */
+#define rem t8
+
+	R10KCBARRIER(0(ra))
+	/*
+	 * The "issue break"s below are very approximate.
+	 * Issue delays for dcache fills will perturb the schedule, as will
+	 * load queue full replay traps, etc.
+	 *
+	 * If len < NBYTES use byte operations.
+	 */
+	PREFS(	0, 0(src) )
+	PREFD(	1, 0(dst) )
+	sltu	t2, len, NBYTES
+	and	t1, dst, ADDRMASK
+	PREFS(	0, 1*32(src) )
+	PREFD(	1, 1*32(dst) )
+	bnez	t2, .Lcopy_bytes_checklen\@
+	 and	t0, src, ADDRMASK
+	PREFS(	0, 2*32(src) )
+	PREFD(	1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
+	bnez	t1, .Ldst_unaligned\@
+	 nop
+	bnez	t0, .Lsrc_unaligned_dst_aligned\@
+#else
+	or	t0, t0, t1
+	bnez	t0, .Lcopy_unaligned_bytes\@
+#endif
+	/*
+	 * use delay slot for fall-through
+	 * src and dst are aligned; need to compute rem
+	 */
+.Lboth_aligned\@:
+	 SRL	t0, len, LOG_NBYTES+3	 # +3 for 8 units/iter
+	beqz	t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
+	 and	rem, len, (8*NBYTES-1)	 # rem = len % (8*NBYTES)
+	PREFS(	0, 3*32(src) )
+	PREFD(	1, 3*32(dst) )
+	.align	4
+1:
+	R10KCBARRIER(0(ra))
+	LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+	SUB	len, len, 8*NBYTES
+	LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+	LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
+	STORE(t0, UNIT(0)(dst),	.Ls_exc_p8u\@)
+	STORE(t1, UNIT(1)(dst),	.Ls_exc_p7u\@)
+	LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
+	LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
+	ADD	src, src, 8*NBYTES
+	ADD	dst, dst, 8*NBYTES
+	STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
+	STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
+	STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
+	STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
+	STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
+	STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
+	PREFS(	0, 8*32(src) )
+	PREFD(	1, 8*32(dst) )
+	bne	len, rem, 1b
+	 nop
+
+	/*
+	 * len == rem == the number of bytes left to copy < 8*NBYTES
+	 */
+.Lcleanup_both_aligned\@:
+	beqz	len, .Ldone\@
+	 sltu	t0, len, 4*NBYTES
+	bnez	t0, .Lless_than_4units\@
+	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES
+	/*
+	 * len >= 4*NBYTES
+	 */
+	LOAD( t0, UNIT(0)(src),	.Ll_exc\@)
+	LOAD( t1, UNIT(1)(src),	.Ll_exc_copy\@)
+	LOAD( t2, UNIT(2)(src),	.Ll_exc_copy\@)
+	LOAD( t3, UNIT(3)(src),	.Ll_exc_copy\@)
+	SUB	len, len, 4*NBYTES
+	ADD	src, src, 4*NBYTES
+	R10KCBARRIER(0(ra))
+	STORE(t0, UNIT(0)(dst),	.Ls_exc_p4u\@)
+	STORE(t1, UNIT(1)(dst),	.Ls_exc_p3u\@)
+	STORE(t2, UNIT(2)(dst),	.Ls_exc_p2u\@)
+	STORE(t3, UNIT(3)(dst),	.Ls_exc_p1u\@)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 4*NBYTES
+	beqz	len, .Ldone\@
+	.set	noreorder
+.Lless_than_4units\@:
+	/*
+	 * rem = len % NBYTES
+	 */
+	beq	rem, len, .Lcopy_bytes\@
+	 nop
+1:
+	R10KCBARRIER(0(ra))
+	LOAD(t0, 0(src), .Ll_exc\@)
+	ADD	src, src, NBYTES
+	SUB	len, len, NBYTES
+	STORE(t0, 0(dst), .Ls_exc_p1u\@)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, NBYTES
+	bne	rem, len, 1b
+	.set	noreorder
+
+#ifndef CONFIG_CPU_MIPSR6
+	/*
+	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+	 * A loop would do only a byte at a time with possible branch
+	 * mispredicts.	 Can't do an explicit LOAD dst,mask,or,STORE
+	 * because can't assume read-access to dst.  Instead, use
+	 * STREST dst, which doesn't require read access to dst.
+	 *
+	 * This code should perform better than a simple loop on modern,
+	 * wide-issue mips processors because the code has fewer branches and
+	 * more instruction-level parallelism.
+	 */
+#define bits t2
+	beqz	len, .Ldone\@
+	 ADD	t1, dst, len	# t1 is just past last byte of dst
+	li	bits, 8*NBYTES
+	SLL	rem, len, 3	# rem = number of bits to keep
+	LOAD(t0, 0(src), .Ll_exc\@)
+	SUB	bits, bits, rem # bits = number of bits to discard
+	SHIFT_DISCARD t0, t0, bits
+	STREST(t0, -1(t1), .Ls_exc\@)
+	jr	ra
+	 move	len, zero
+.Ldst_unaligned\@:
+	/*
+	 * dst is unaligned
+	 * t0 = src & ADDRMASK
+	 * t1 = dst & ADDRMASK; T1 > 0
+	 * len >= NBYTES
+	 *
+	 * Copy enough bytes to align dst
+	 * Set match = (src and dst have same alignment)
+	 */
+#define match rem
+	LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+	ADD	t2, zero, NBYTES
+	LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+	SUB	t2, t2, t1	# t2 = number of bytes copied
+	xor	match, t0, t1
+	R10KCBARRIER(0(ra))
+	STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+	beq	len, t2, .Ldone\@
+	 SUB	len, len, t2
+	ADD	dst, dst, t2
+	beqz	match, .Lboth_aligned\@
+	 ADD	src, src, t2
+
+.Lsrc_unaligned_dst_aligned\@:
+	SRL	t0, len, LOG_NBYTES+2	 # +2 for 4 units/iter
+	PREFS(	0, 3*32(src) )
+	beqz	t0, .Lcleanup_src_unaligned\@
+	 and	rem, len, (4*NBYTES-1)	 # rem = len % 4*NBYTES
+	PREFD(	1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+	R10KCBARRIER(0(ra))
+	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+	LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+	SUB	len, len, 4*NBYTES
+	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+	LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+	LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+	LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+	LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+	LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+	PREFS(	0, 9*32(src) )		# 0 is PREF_LOAD  (not streamed)
+	ADD	src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+	nop				# improves slotting
+#endif
+	STORE(t0, UNIT(0)(dst),	.Ls_exc_p4u\@)
+	STORE(t1, UNIT(1)(dst),	.Ls_exc_p3u\@)
+	STORE(t2, UNIT(2)(dst),	.Ls_exc_p2u\@)
+	STORE(t3, UNIT(3)(dst),	.Ls_exc_p1u\@)
+	PREFD(	1, 9*32(dst) )		# 1 is PREF_STORE (not streamed)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 4*NBYTES
+	bne	len, rem, 1b
+	.set	noreorder
+
+.Lcleanup_src_unaligned\@:
+	beqz	len, .Ldone\@
+	 and	rem, len, NBYTES-1  # rem = len % NBYTES
+	beq	rem, len, .Lcopy_bytes\@
+	 nop
+1:
+	R10KCBARRIER(0(ra))
+	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+	ADD	src, src, NBYTES
+	SUB	len, len, NBYTES
+	STORE(t0, 0(dst), .Ls_exc_p1u\@)
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, NBYTES
+	bne	len, rem, 1b
+	.set	noreorder
+
+#endif /* !CONFIG_CPU_MIPSR6 */
+.Lcopy_bytes_checklen\@:
+	beqz	len, .Ldone\@
+	 nop
+.Lcopy_bytes\@:
+	/* 0 < len < NBYTES  */
+	R10KCBARRIER(0(ra))
+#define COPY_BYTE(N)			\
+	LOADB(t0, N(src), .Ll_exc\@);	\
+	SUB	len, len, 1;		\
+	beqz	len, .Ldone\@;		\
+	STOREB(t0, N(dst), .Ls_exc_p1\@)
+
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+#ifdef USE_DOUBLE
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+#endif
+	LOADB(t0, NBYTES-2(src), .Ll_exc\@)
+	SUB	len, len, 1
+	jr	ra
+	STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
+.Ldone\@:
+	jr	ra
+	 nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes\@:
+1:
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+	COPY_BYTE(6)
+	COPY_BYTE(7)
+	ADD	src, src, 8
+	b	1b
+	 ADD	dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
+	.if __memcpy == 1
+	END(memcpy)
+	.set __memcpy, 0
+	.hidden __memcpy
+	.endif
+
+.Ll_exc_copy\@:
+	/*
+	 * Copy bytes from src until faulting load address (or until a
+	 * lb faults)
+	 *
+	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+	 * may be more than a byte beyond the last address.
+	 * Hence, the lb below may get an exception.
+	 *
+	 * Assumes src < THREAD_BUADDR($28)
+	 */
+	LOADK	t0, TI_TASK($28)
+	 nop
+	LOADK	t0, THREAD_BUADDR(t0)
+1:
+	LOADB(t1, 0(src), .Ll_exc\@)
+	ADD	src, src, 1
+	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
+	.set	reorder				/* DADDI_WAR */
+	ADD	dst, dst, 1
+	bne	src, t0, 1b
+	.set	noreorder
+.Ll_exc\@:
+	LOADK	t0, TI_TASK($28)
+	 nop
+	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
+	 nop
+	SUB	len, AT, t0		# len number of uncopied bytes
+	bnez	t6, .Ldone\@	/* Skip the zeroing part if inatomic */
+	/*
+	 * Here's where we rely on src and dst being incremented in tandem,
+	 *   See (3) above.
+	 * dst += (fault addr - src) to put dst at first byte to clear
+	 */
+	ADD	dst, t0			# compute start address in a1
+	SUB	dst, src
+	/*
+	 * Clear len bytes starting at dst.  Can't call __bzero because it
+	 * might modify len.  An inefficient loop for these rare times...
+	 */
+	.set	reorder				/* DADDI_WAR */
+	SUB	src, len, 1
+	beqz	len, .Ldone\@
+	.set	noreorder
+1:	sb	zero, 0(dst)
+	ADD	dst, dst, 1
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+	bnez	src, 1b
+	 SUB	src, src, 1
+#else
+	.set	push
+	.set	noat
+	li	v1, 1
+	bnez	src, 1b
+	 SUB	src, src, v1
+	.set	pop
+#endif
+	jr	ra
+	 nop
+
+
+#define SEXC(n)							\
+	.set	reorder;			/* DADDI_WAR */ \
+.Ls_exc_p ## n ## u\@:						\
+	ADD	len, len, n*NBYTES;				\
+	jr	ra;						\
+	.set	noreorder
+
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+.Ls_exc_p1\@:
+	.set	reorder				/* DADDI_WAR */
+	ADD	len, len, 1
+	jr	ra
+	.set	noreorder
+.Ls_exc\@:
+	jr	ra
+	 nop
+	.endm
+
+	.align	5
+LEAF(memmove)
+	ADD	t0, a0, a2
+	ADD	t1, a1, a2
+	sltu	t0, a1, t0			# dst + len <= src -> memcpy
+	sltu	t1, a0, t1			# dst >= src + len -> memcpy
+	and	t0, t1
+	beqz	t0, .L__memcpy
+	 move	v0, a0				/* return value */
+	beqz	a2, .Lr_out
+	END(memmove)
+
+	/* fall through to __rmemcpy */
+LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */
+	 sltu	t0, a1, a0
+	beqz	t0, .Lr_end_bytes_up		# src >= dst
+	 nop
+	ADD	a0, a2				# dst = dst + len
+	ADD	a1, a2				# src = src + len
+
+.Lr_end_bytes:
+	R10KCBARRIER(0(ra))
+	lb	t0, -1(a1)
+	SUB	a2, a2, 0x1
+	sb	t0, -1(a0)
+	SUB	a1, a1, 0x1
+	.set	reorder				/* DADDI_WAR */
+	SUB	a0, a0, 0x1
+	bnez	a2, .Lr_end_bytes
+	.set	noreorder
+
+.Lr_out:
+	jr	ra
+	 move	a2, zero
+
+.Lr_end_bytes_up:
+	R10KCBARRIER(0(ra))
+	lb	t0, (a1)
+	SUB	a2, a2, 0x1
+	sb	t0, (a0)
+	ADD	a1, a1, 0x1
+	.set	reorder				/* DADDI_WAR */
+	ADD	a0, a0, 0x1
+	bnez	a2, .Lr_end_bytes_up
+	.set	noreorder
+
+	jr	ra
+	 move	a2, zero
+	END(__rmemcpy)
+
+/*
+ * t6 is used as a flag to note inatomic mode.
+ */
+LEAF(__copy_user_inatomic)
+	b	__copy_user_common
+	li	t6, 1
+	END(__copy_user_inatomic)
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+	.align	5
+LEAF(memcpy)					/* a0=dst a1=src a2=len */
+	move	v0, dst				/* return value */
+.L__memcpy:
+FEXPORT(__copy_user)
+	li	t6, 0	/* not inatomic */
+__copy_user_common:
+	/* Legacy Mode, user <-> user */
+	__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
+
+#ifdef CONFIG_EVA
+
+/*
+ * For EVA we need distinct symbols for reading and writing to user space.
+ * This is because we need to use specific EVA instructions to perform the
+ * virtual <-> physical translation when a virtual address is actually in user
+ * space
+ */
+
+LEAF(__copy_user_inatomic_eva)
+	b       __copy_from_user_common
+	li	t6, 1
+	END(__copy_user_inatomic_eva)
+
+/*
+ * __copy_from_user (EVA)
+ */
+
+LEAF(__copy_from_user_eva)
+	li	t6, 0	/* not inatomic */
+__copy_from_user_common:
+	__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
+END(__copy_from_user_eva)
+
+
+
+/*
+ * __copy_to_user (EVA)
+ */
+
+LEAF(__copy_to_user_eva)
+__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
+END(__copy_to_user_eva)
+
+/*
+ * __copy_in_user (EVA)
+ */
+
+LEAF(__copy_in_user_eva)
+__BUILD_COPY_USER EVA_MODE USEROP USEROP
+END(__copy_in_user_eva)
+
+#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
new file mode 100644
index 0000000..8f0019a
--- /dev/null
+++ b/arch/mips/lib/memset.S
@@ -0,0 +1,295 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#if LONGSIZE == 4
+#define LONG_S_L swl
+#define LONG_S_R swr
+#else
+#define LONG_S_L sdl
+#define LONG_S_R sdr
+#endif
+
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef  LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
+#define LEGACY_MODE 1
+#define EVA_MODE    2
+
+/*
+ * No need to protect it with EVA #ifdefery. The generated block of code
+ * will never be assembled if EVA is not enabled.
+ */
+#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr)
+#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr)
+
+#define EX(insn,reg,addr,handler)			\
+	.if \mode == LEGACY_MODE;			\
+9:		insn	reg, addr;			\
+	.else;						\
+9:		___BUILD_EVA_INSN(insn, reg, addr);	\
+	.endif;						\
+	.section __ex_table,"a";			\
+	PTR	9b, handler;				\
+	.previous
+
+	.macro	f_fill64 dst, offset, val, fixup, mode
+	EX(LONG_S, \val, (\offset +  0 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  1 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  2 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+	EX(LONG_S, \val, (\offset +  4 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  5 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  6 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+	EX(LONG_S, \val, (\offset +  8 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  9 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
+#endif
+	.endm
+
+	.set	noreorder
+	.align	5
+
+	/*
+	 * Macro to generate the __bzero{,_user} symbol
+	 * Arguments:
+	 * mode: LEGACY_MODE or EVA_MODE
+	 */
+	.macro __BUILD_BZERO mode
+	/* Initialize __memset if this is the first time we call this macro */
+	.ifnotdef __memset
+	.set __memset, 1
+	.hidden __memset /* Make sure it does not leak */
+	.endif
+
+	sltiu		t0, a2, STORSIZE	/* very small region? */
+	bnez		t0, .Lsmall_memset\@
+	andi		t0, a0, STORMASK	/* aligned? */
+
+#ifdef CONFIG_CPU_MICROMIPS
+	move		t8, a1			/* used by 'swp' instruction */
+	move		t9, a1
+#endif
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+	beqz		t0, 1f
+	PTR_SUBU	t0, STORSIZE		/* alignment in bytes */
+#else
+	.set		noat
+	li		AT, STORSIZE
+	beqz		t0, 1f
+	PTR_SUBU	t0, AT			/* alignment in bytes */
+	.set		at
+#endif
+
+#ifndef CONFIG_CPU_MIPSR6
+	R10KCBARRIER(0(ra))
+#ifdef __MIPSEB__
+	EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
+#else
+	EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
+#endif
+	PTR_SUBU	a0, t0			/* long align ptr */
+	PTR_ADDU	a2, t0			/* correct size */
+
+#else /* CONFIG_CPU_MIPSR6 */
+#define STORE_BYTE(N)				\
+	EX(sb, a1, N(a0), .Lbyte_fixup\@);	\
+	beqz		t0, 0f;			\
+	PTR_ADDU	t0, 1;
+
+	PTR_ADDU	a2, t0			/* correct size */
+	PTR_ADDU	t0, 1
+	STORE_BYTE(0)
+	STORE_BYTE(1)
+#if LONGSIZE == 4
+	EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+	STORE_BYTE(2)
+	STORE_BYTE(3)
+	STORE_BYTE(4)
+	STORE_BYTE(5)
+	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+	ori		a0, STORMASK
+	xori		a0, STORMASK
+	PTR_ADDIU	a0, STORSIZE
+#endif /* CONFIG_CPU_MIPSR6 */
+1:	ori		t1, a2, 0x3f		/* # of full blocks */
+	xori		t1, 0x3f
+	beqz		t1, .Lmemset_partial\@	/* no block to fill */
+	andi		t0, a2, 0x40-STORSIZE
+
+	PTR_ADDU	t1, a0			/* end address */
+	.set		reorder
+1:	PTR_ADDIU	a0, 64
+	R10KCBARRIER(0(ra))
+	f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
+	bne		t1, a0, 1b
+	.set		noreorder
+
+.Lmemset_partial\@:
+	R10KCBARRIER(0(ra))
+	PTR_LA		t1, 2f			/* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+	LONG_SRL	t7, t0, 1
+#endif
+#if LONGSIZE == 4
+	PTR_SUBU	t1, FILLPTRG
+#else
+	.set		noat
+	LONG_SRL	AT, FILLPTRG, 1
+	PTR_SUBU	t1, AT
+	.set		at
+#endif
+	jr		t1
+	PTR_ADDU	a0, t0			/* dest ptr */
+
+	.set		push
+	.set		noreorder
+	.set		nomacro
+	/* ... but first do longs ... */
+	f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
+2:	.set		pop
+	andi		a2, STORMASK		/* At most one long to go */
+
+	beqz		a2, 1f
+#ifndef CONFIG_CPU_MIPSR6
+	PTR_ADDU	a0, a2			/* What's left */
+	R10KCBARRIER(0(ra))
+#ifdef __MIPSEB__
+	EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
+#else
+	EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
+#endif
+#else
+	PTR_SUBU	t0, $0, a2
+	PTR_ADDIU	t0, 1
+	STORE_BYTE(0)
+	STORE_BYTE(1)
+#if LONGSIZE == 4
+	EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+	STORE_BYTE(2)
+	STORE_BYTE(3)
+	STORE_BYTE(4)
+	STORE_BYTE(5)
+	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif
+1:	jr		ra
+	move		a2, zero
+
+.Lsmall_memset\@:
+	beqz		a2, 2f
+	PTR_ADDU	t1, a0, a2
+
+1:	PTR_ADDIU	a0, 1			/* fill bytewise */
+	R10KCBARRIER(0(ra))
+	bne		t1, a0, 1b
+	sb		a1, -1(a0)
+
+2:	jr		ra			/* done */
+	move		a2, zero
+	.if __memset == 1
+	END(memset)
+	.set __memset, 0
+	.hidden __memset
+	.endif
+
+.Lbyte_fixup\@:
+	PTR_SUBU	a2, $0, t0
+	jr		ra
+	 PTR_ADDIU	a2, 1
+
+.Lfirst_fixup\@:
+	jr	ra
+	nop
+
+.Lfwd_fixup\@:
+	PTR_L		t0, TI_TASK($28)
+	andi		a2, 0x3f
+	LONG_L		t0, THREAD_BUADDR(t0)
+	LONG_ADDU	a2, t1
+	jr		ra
+	LONG_SUBU	a2, t0
+
+.Lpartial_fixup\@:
+	PTR_L		t0, TI_TASK($28)
+	andi		a2, STORMASK
+	LONG_L		t0, THREAD_BUADDR(t0)
+	LONG_ADDU	a2, t1
+	jr		ra
+	LONG_SUBU	a2, t0
+
+.Llast_fixup\@:
+	jr		ra
+	andi		v1, a2, STORMASK
+
+	.endm
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+
+LEAF(memset)
+	beqz		a1, 1f
+	move		v0, a0			/* result */
+
+	andi		a1, 0xff		/* spread fillword */
+	LONG_SLL		t1, a1, 8
+	or		a1, t1
+	LONG_SLL		t1, a1, 16
+#if LONGSIZE == 8
+	or		a1, t1
+	LONG_SLL		t1, a1, 32
+#endif
+	or		a1, t1
+1:
+#ifndef CONFIG_EVA
+FEXPORT(__bzero)
+#else
+FEXPORT(__bzero_kernel)
+#endif
+	__BUILD_BZERO LEGACY_MODE
+
+#ifdef CONFIG_EVA
+LEAF(__bzero)
+	__BUILD_BZERO EVA_MODE
+END(__bzero)
+#endif
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 0000000..272af8a
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,141 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+#include <linux/stringify.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+notrace void arch_local_irq_disable(void)
+{
+	preempt_disable();
+
+	__asm__ __volatile__(
+	"	.set	push						\n"
+	"	.set	noat						\n"
+	"	mfc0	$1,$12						\n"
+	"	ori	$1,0x1f						\n"
+	"	xori	$1,0x1f						\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1,$12						\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
+
+	preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+
+	preempt_disable();
+
+	__asm__ __volatile__(
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+	"	mfc0	%[flags], $12					\n"
+	"	ori	$1, %[flags], 0x1f				\n"
+	"	xori	$1, 0x1f					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $12						\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
+
+	preempt_enable();
+
+	return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+	preempt_disable();
+
+	__asm__ __volatile__(
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
+	preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+notrace void __arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+	preempt_disable();
+
+	__asm__ __volatile__(
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
+	preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !CONFIG_CPU_MIPSR2 */
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
new file mode 100644
index 0000000..cfcbb52
--- /dev/null
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -0,0 +1,74 @@
+/*
+ * Dump R3000 TLB for debugging purposes.
+ *
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Harald Koerfgen
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/tlbdebug.h>
+
+extern int r3k_have_wired_reg;
+
+void dump_tlb_regs(void)
+{
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("EntryHi  : %0lx\n", read_c0_entryhi());
+	pr_info("EntryLo  : %0lx\n", read_c0_entrylo0());
+	if (r3k_have_wired_reg)
+		pr_info("Wired    : %0x\n", read_c0_wired());
+}
+
+static void dump_tlb(int first, int last)
+{
+	int	i;
+	unsigned int asid;
+	unsigned long entryhi, entrylo0;
+
+	asid = read_c0_entryhi() & ASID_MASK;
+
+	for (i = first; i <= last; i++) {
+		write_c0_index(i<<8);
+		__asm__ __volatile__(
+			".set\tnoreorder\n\t"
+			"tlbr\n\t"
+			"nop\n\t"
+			".set\treorder");
+		entryhi	 = read_c0_entryhi();
+		entrylo0 = read_c0_entrylo0();
+
+		/* Unused entries have a virtual address of KSEG0.  */
+		if ((entryhi & PAGE_MASK) != KSEG0 &&
+		    (entrylo0 & R3K_ENTRYLO_G ||
+		     (entryhi & ASID_MASK) == asid)) {
+			/*
+			 * Only print entries in use
+			 */
+			printk("Index: %2d ", i);
+
+			printk("va=%08lx asid=%08lx"
+			       "  [pa=%06lx n=%d d=%d v=%d g=%d]",
+			       entryhi & PAGE_MASK,
+			       entryhi & ASID_MASK,
+			       entrylo0 & PAGE_MASK,
+			       (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+			       (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+			       (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+			       (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+		}
+	}
+	printk("\n");
+
+	write_c0_entryhi(asid);
+}
+
+void dump_tlb_all(void)
+{
+	dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
new file mode 100644
index 0000000..929bbac
--- /dev/null
+++ b/arch/mips/lib/strlen_user.S
@@ -0,0 +1,61 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler)			\
+9:	insn	reg, addr;				\
+	.section __ex_table,"a";			\
+	PTR	9b, handler;				\
+	.previous
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+	.macro __BUILD_STRLEN_ASM func
+LEAF(__strlen_\func\()_asm)
+	LONG_L		v0, TI_ADDR_LIMIT($28)	# pointer ok?
+	and		v0, a0
+	bnez		v0, .Lfault\@
+
+	move		v0, a0
+.ifeqs "\func", "kernel"
+1:	EX(lbu, v1, (v0), .Lfault\@)
+.else
+1:	EX(lbue, v1, (v0), .Lfault\@)
+.endif
+	PTR_ADDIU	v0, 1
+	bnez		v1, 1b
+	PTR_SUBU	v0, a0
+	jr		ra
+	END(__strlen_\func\()_asm)
+
+.Lfault\@:	move		v0, zero
+	jr		ra
+	.endm
+
+#ifndef CONFIG_EVA
+	/* Set aliases */
+	.global __strlen_user_asm
+	.set __strlen_user_asm, __strlen_kernel_asm
+#endif
+
+__BUILD_STRLEN_ASM kernel
+
+#ifdef CONFIG_EVA
+
+	.set push
+	.set eva
+__BUILD_STRLEN_ASM user
+	.set pop
+#endif
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
new file mode 100644
index 0000000..3c32baf
--- /dev/null
+++ b/arch/mips/lib/strncpy_user.S
@@ -0,0 +1,84 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler)			\
+9:	insn	reg, addr;				\
+	.section __ex_table,"a";			\
+	PTR	9b, handler;				\
+	.previous
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire
+ * buffer filled, else strlen.
+ */
+
+/*
+ * Ugly special case have to check: we might get passed a user space
+ * pointer which wraps into the kernel space.  We don't deal with that.	 If
+ * it happens at most some bytes of the exceptions handlers will be copied.
+ */
+
+	.macro __BUILD_STRNCPY_ASM func
+LEAF(__strncpy_from_\func\()_asm)
+	LONG_L		v0, TI_ADDR_LIMIT($28)	# pointer ok?
+	and		v0, a1
+	bnez		v0, .Lfault\@
+
+FEXPORT(__strncpy_from_\func\()_nocheck_asm)
+	move		t0, zero
+	move		v1, a1
+.ifeqs "\func","kernel"
+1:	EX(lbu, v0, (v1), .Lfault\@)
+.else
+1:	EX(lbue, v0, (v1), .Lfault\@)
+.endif
+	PTR_ADDIU	v1, 1
+	R10KCBARRIER(0(ra))
+	sb		v0, (a0)
+	beqz		v0, 2f
+	PTR_ADDIU	t0, 1
+	PTR_ADDIU	a0, 1
+	bne		t0, a2, 1b
+2:	PTR_ADDU	v0, a1, t0
+	xor		v0, a1
+	bltz		v0, .Lfault\@
+	move		v0, t0
+	jr		ra			# return n
+	END(__strncpy_from_\func\()_asm)
+
+.Lfault\@:
+	li		v0, -EFAULT
+	jr		ra
+
+	.section	__ex_table,"a"
+	PTR		1b, .Lfault\@
+	.previous
+
+	.endm
+
+#ifndef CONFIG_EVA
+	/* Set aliases */
+	.global __strncpy_from_user_asm
+	.global __strncpy_from_user_nocheck_asm
+	.set __strncpy_from_user_asm, __strncpy_from_kernel_asm
+	.set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm
+#endif
+
+__BUILD_STRNCPY_ASM kernel
+
+#ifdef CONFIG_EVA
+	.set push
+	.set eva
+__BUILD_STRNCPY_ASM user
+	.set pop
+#endif
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
new file mode 100644
index 0000000..77e6494
--- /dev/null
+++ b/arch/mips/lib/strnlen_user.S
@@ -0,0 +1,83 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (c) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler)			\
+9:	insn	reg, addr;				\
+	.section __ex_table,"a";			\
+	PTR	9b, handler;				\
+	.previous
+
+/*
+ * Return the size of a string including the ending NUL character up to a
+ * maximum of a1 or 0 in case of error.
+ *
+ * Note: for performance reasons we deliberately accept that a user may
+ *	 make strlen_user and strnlen_user access the first few KSEG0
+ *	 bytes.	 There's nothing secret there.	On 64-bit accessing beyond
+ *	 the maximum is a tad hairier ...
+ */
+	.macro __BUILD_STRNLEN_ASM func
+LEAF(__strnlen_\func\()_asm)
+	LONG_L		v0, TI_ADDR_LIMIT($28)	# pointer ok?
+	and		v0, a0
+	bnez		v0, .Lfault\@
+
+FEXPORT(__strnlen_\func\()_nocheck_asm)
+	move		v0, a0
+	PTR_ADDU	a1, a0			# stop pointer
+1:
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+	.set		noat
+	li		AT, 1
+#endif
+	beq		v0, a1, 1f		# limit reached?
+.ifeqs "\func", "kernel"
+	EX(lb, t0, (v0), .Lfault\@)
+.else
+	EX(lbe, t0, (v0), .Lfault\@)
+.endif
+	.set		noreorder
+	bnez		t0, 1b
+1:
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+	 PTR_ADDIU	v0, 1
+#else
+	 PTR_ADDU	v0, AT
+	.set		at
+#endif
+	.set		reorder
+	PTR_SUBU	v0, a0
+	jr		ra
+	END(__strnlen_\func\()_asm)
+
+.Lfault\@:
+	move		v0, zero
+	jr		ra
+	.endm
+
+#ifndef CONFIG_EVA
+	/* Set aliases */
+	.global __strnlen_user_asm
+	.global __strnlen_user_nocheck_asm
+	.set __strnlen_user_asm, __strnlen_kernel_asm
+	.set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm
+#endif
+
+__BUILD_STRNLEN_ASM kernel
+
+#ifdef CONFIG_EVA
+
+	.set push
+	.set eva
+__BUILD_STRNLEN_ASM user
+	.set pop
+#endif
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
new file mode 100644
index 0000000..bd599f5
--- /dev/null
+++ b/arch/mips/lib/ucmpdi2.c
@@ -0,0 +1,21 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+	const DWunion au = {.ll = a};
+	const DWunion bu = {.ll = b};
+
+	if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
+		return 0;
+	else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
+		return 2;
+	if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+		return 0;
+	else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+		return 2;
+	return 1;
+}
+
+EXPORT_SYMBOL(__ucmpdi2);
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
new file mode 100644
index 0000000..09d5dee
--- /dev/null
+++ b/arch/mips/lib/uncached.c
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Thiemo Seufer
+ * Copyright (C) 2005  MIPS Technologies, Inc.	All rights reserved.
+ *	Author: Maciej W. Rozycki <macro@mips.com>
+ */
+
+
+#include <asm/addrspace.h>
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+#ifndef CKSEG2
+#define CKSEG2 CKSSEG
+#endif
+#ifndef TO_PHYS_MASK
+#define TO_PHYS_MASK -1
+#endif
+
+/*
+ * FUNC is executed in one of the uncached segments, depending on its
+ * original address as follows:
+ *
+ * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
+ *    segment used is CKSEG1.
+ * 2. If the original address is in XKPHYS, then the uncached segment
+ *    used is XKPHYS(2).
+ * 3. Otherwise it's a bug.
+ *
+ * The same remapping is done with the stack pointer.  Stack handling
+ * works because we don't handle stack arguments or more complex return
+ * values, so we can avoid sharing the same stack area between a cached
+ * and the uncached mode.
+ */
+unsigned long run_uncached(void *func)
+{
+	register long sp __asm__("$sp");
+	register long ret __asm__("$2");
+	long lfunc = (long)func, ufunc;
+	long usp;
+
+	if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
+		usp = CKSEG1ADDR(sp);
+#ifdef CONFIG_64BIT
+	else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
+		 (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
+		usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
+				     XKPHYS_TO_PHYS((long long)sp));
+#endif
+	else {
+		BUG();
+		usp = sp;
+	}
+	if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
+		ufunc = CKSEG1ADDR(lfunc);
+#ifdef CONFIG_64BIT
+	else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
+		 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
+		ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
+				       XKPHYS_TO_PHYS((long long)lfunc));
+#endif
+	else {
+		BUG();
+		ufunc = lfunc;
+	}
+
+	__asm__ __volatile__ (
+		"	move	$16, $sp\n"
+		"	move	$sp, %1\n"
+		"	jalr	%2\n"
+		"	move	$sp, $16"
+		: "=r" (ret)
+		: "r" (usp), "r" (ufunc)
+		: "$16", "$31");
+
+	return ret;
+}