File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 0000000..0b10ef2
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,52 @@
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bugs.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += preempt.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 0000000..2c30a01
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR	0x61	/* DCCM Base Addr */
+#define ARC_REG_CRC_BCR		0x62
+#define ARC_REG_VECBASE_BCR	0x68
+#define ARC_REG_PERIBASE_BCR	0x69
+#define ARC_REG_FP_BCR		0x6B	/* ARCompact: Single-Precision FPU */
+#define ARC_REG_DPFP_BCR	0x6C	/* ARCompact: Dbl Precision FPU */
+#define ARC_REG_FP_V2_BCR	0xc8	/* ARCv2 FPU */
+#define ARC_REG_SLC_BCR		0xce
+#define ARC_REG_DCCM_BCR	0x74	/* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR	0x75
+#define ARC_REG_AP_BCR		0x76
+#define ARC_REG_ICCM_BCR	0x78
+#define ARC_REG_XY_MEM_BCR	0x79
+#define ARC_REG_MAC_BCR		0x7a
+#define ARC_REG_MUL_BCR		0x7b
+#define ARC_REG_SWAP_BCR	0x7c
+#define ARC_REG_NORM_BCR	0x7d
+#define ARC_REG_MIXMAX_BCR	0x7e
+#define ARC_REG_BARREL_BCR	0x7f
+#define ARC_REG_D_UNCACH_BCR	0x6A
+#define ARC_REG_BPU_BCR		0xc0
+#define ARC_REG_ISA_CFG_BCR	0xc1
+#define ARC_REG_RTT_BCR		0xF2
+#define ARC_REG_IRQ_BCR		0xF3
+#define ARC_REG_SMART_BCR	0xFF
+#define ARC_REG_CLUSTER_BCR	0xcf
+
+/* status32 Bits Positions */
+#define STATUS_AE_BIT		5	/* Exception active */
+#define STATUS_DE_BIT		6	/* PC is in delay slot */
+#define STATUS_U_BIT		7	/* User/Kernel mode */
+#define STATUS_L_BIT		12	/* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
+#define STATUS_U_MASK		(1<<STATUS_U_BIT)
+#define STATUS_L_MASK		(1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#ifdef CONFIG_ISA_ARCOMPACT
+#define ECR_V_MEM_ERR			0x01
+#define ECR_V_INSN_ERR			0x02
+#define ECR_V_MACH_CHK			0x20
+#define ECR_V_ITLB_MISS			0x21
+#define ECR_V_DTLB_MISS			0x22
+#define ECR_V_PROTV			0x23
+#define ECR_V_TRAP			0x25
+#else
+#define ECR_V_MEM_ERR			0x01
+#define ECR_V_INSN_ERR			0x02
+#define ECR_V_MACH_CHK			0x03
+#define ECR_V_ITLB_MISS			0x04
+#define ECR_V_DTLB_MISS			0x05
+#define ECR_V_PROTV			0x06
+#define ECR_V_TRAP			0x09
+#endif
+
+/* DTLB Miss and Protection Violation Cause Codes */
+
+#define ECR_C_PROTV_INST_FETCH		0x00
+#define ECR_C_PROTV_LOAD		0x01
+#define ECR_C_PROTV_STORE		0x02
+#define ECR_C_PROTV_XCHG		0x03
+#define ECR_C_PROTV_MISALIG_DATA	0x04
+
+#define ECR_C_BIT_PROTV_MISALIG_DATA	10
+
+/* Machine Check Cause Code Values */
+#define ECR_C_MCHK_DUP_TLB		0x01
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS		8
+#define ECR_C_BIT_DTLB_ST_MISS		9
+
+/* Auxiliary registers */
+#define AUX_IDENTITY		4
+#define AUX_INTR_VEC_BASE	0x25
+#define AUX_NON_VOL		0x5e
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT         0x300
+#define ARC_AUX_DPFP_1L         0x301
+#define ARC_AUX_DPFP_1H         0x302
+#define ARC_AUX_DPFP_2L         0x303
+#define ARC_AUX_DPFP_2H         0x304
+#define ARC_AUX_DPFP_STAT       0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ *      Inline ASM macros to read/write AUX Regs
+ *      Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg)	__builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val)		\
+		__builtin_arc_sr((unsigned int)(val), reg_immed)
+
+#else
+
+#define read_aux_reg(reg)		\
+({					\
+	unsigned int __ret;		\
+	__asm__ __volatile__(		\
+	"	lr    %0, [%1]"		\
+	: "=r"(__ret)			\
+	: "i"(reg));			\
+	__ret;				\
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ *    write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val)	\
+({					\
+	__asm__ __volatile__(		\
+	"	sr   %0, [%1]	\n"	\
+	:				\
+	: "ir"(val), "i"(reg_imm));	\
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ *  * e.g.
+ *      reg_num = 0x69
+ *      write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ *  memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val)		\
+({						\
+	unsigned int tmp;			\
+						\
+	__asm__ __volatile__(			\
+	"	ld   %0, [%2]	\n\t"		\
+	"	sr   %1, [%0]	\n\t"		\
+	: "=&r"(tmp)				\
+	: "r"(val), "memory"(&reg_in_var));	\
+})
+
+#endif
+
+#define READ_BCR(reg, into)				\
+{							\
+	unsigned int tmp;				\
+	tmp = read_aux_reg(reg);			\
+	if (sizeof(tmp) == sizeof(into)) {		\
+		into = *((typeof(into) *)&tmp);		\
+	} else {					\
+		extern void bogus_undefined(void);	\
+		bogus_undefined();			\
+	}						\
+}
+
+#define WRITE_AUX(reg, into)				\
+{							\
+	unsigned int tmp;				\
+	if (sizeof(tmp) == sizeof(into)) {		\
+		tmp = (*(unsigned int *)&(into));	\
+		write_aux_reg(reg, tmp);		\
+	} else  {					\
+		extern void bogus_undefined(void);	\
+		bogus_undefined();			\
+	}						\
+}
+
+/* Helpers */
+#define TO_KB(bytes)		((bytes) >> 10)
+#define TO_MB(bytes)		(TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages)	((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages)	(PAGES_TO_KB(n_pages) >> 10)
+
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+	unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_isa {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
+		     pad1:11, atomic1:1, ver:8;
+#else
+	unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
+		     ldd:1, pad2:4, div_rem:4;
+#endif
+};
+
+struct bcr_mpy {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
+#else
+	unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+	unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int start:8, pad2:8, sz:8, ver:8;
+#else
+	unsigned int ver:8, sz:8, pad2:8, start:8;
+#endif
+};
+
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+	unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int addr:24, ver:8;
+#else
+	unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int res:21, sz:3, ver:8;
+#else
+	unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* ARCompact: Both SP and DP FPU BCRs have same format */
+struct bcr_fp_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int fast:1, ver:8;
+#else
+	unsigned int ver:8, fast:1;
+#endif
+};
+
+struct bcr_fp_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad2:15, dp:1, pad1:7, sp:1, ver:8;
+#else
+	unsigned int ver:8, sp:1, pad1:7, dp:1, pad2:15;
+#endif
+};
+
+struct bcr_timer {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad2:15, rtsc:1, pad1:5, rtc:1, t1:1, t0:1, ver:8;
+#else
+	unsigned int ver:8, t0:1, t1:1, rtc:1, pad1:5, rtsc:1, pad2:15;
+#endif
+};
+
+struct bcr_bpu_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8;
+#else
+	unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19;
+#endif
+};
+
+struct bcr_bpu_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:6, fbe:2, tqe:2, ts:4, ft:1, rse:2, pte:3, bce:3, ver:8;
+#else
+	unsigned int ver:8, bce:3, pte:3, rse:2, ft:1, ts:4, tqe:2, fbe:2, pad:6;
+#endif
+};
+
+struct bcr_generic {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:24, ver:8;
+#else
+	unsigned int ver:8, pad:24;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+	unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
+	unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
+};
+
+struct cpuinfo_arc_cache {
+	unsigned int sz_k:14, line_len:8, assoc:4, ver:4, alias:1, vipt:1;
+};
+
+struct cpuinfo_arc_bpu {
+	unsigned int ver, full, num_cache, num_pred;
+};
+
+struct cpuinfo_arc_ccm {
+	unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+	struct cpuinfo_arc_cache icache, dcache, slc;
+	struct cpuinfo_arc_mmu mmu;
+	struct cpuinfo_arc_bpu bpu;
+	struct bcr_identity core;
+	struct bcr_isa isa;
+	struct bcr_timer timers;
+	unsigned int vec_base;
+	struct cpuinfo_arc_ccm iccm, dccm;
+	struct {
+		unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+			     fpu_sp:1, fpu_dp:1, pad2:6,
+			     debug:1, ap:1, smart:1, rtt:1, pad3:4,
+			     pad4:8;
+	} extn;
+	struct bcr_mpy extn_mpy;
+	struct bcr_extn_xymem extn_xymem;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+static inline int is_isa_arcv2(void)
+{
+	return IS_ENABLED(CONFIG_ISA_ARCV2);
+}
+
+static inline int is_isa_arcompact(void)
+{
+	return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
+}
+
+#endif /* __ASEMBLY__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 0000000..dad1876
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 0000000..7730d30
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v)  READ_ONCE((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#ifdef CONFIG_ARC_STAR_9000923308
+
+#define SCOND_FAIL_RETRY_VAR_DEF						\
+	unsigned int delay = 1, tmp;						\
+
+#define SCOND_FAIL_RETRY_ASM							\
+	"	bz	4f			\n"				\
+	"   ; --- scond fail delay ---		\n"				\
+	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
+	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
+	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
+	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
+	"	b	1b			\n"	/* start over */	\
+	"4: ; --- success ---			\n"				\
+
+#define SCOND_FAIL_RETRY_VARS							\
+	  ,[delay] "+&r" (delay),[tmp] "=&r"	(tmp)				\
+
+#else	/* !CONFIG_ARC_STAR_9000923308 */
+
+#define SCOND_FAIL_RETRY_VAR_DEF
+
+#define SCOND_FAIL_RETRY_ASM							\
+	"	bnz     1b			\n"				\
+
+#define SCOND_FAIL_RETRY_VARS
+
+#endif
+
+#define ATOMIC_OP(op, c_op, asm_op)					\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	unsigned int val;				                \
+	SCOND_FAIL_RETRY_VAR_DEF                                        \
+									\
+	__asm__ __volatile__(						\
+	"1:	llock   %[val], [%[ctr]]		\n"		\
+	"	" #asm_op " %[val], %[val], %[i]	\n"		\
+	"	scond   %[val], [%[ctr]]		\n"		\
+	"						\n"		\
+	SCOND_FAIL_RETRY_ASM						\
+									\
+	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
+	  SCOND_FAIL_RETRY_VARS						\
+	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
+	  [i]	"ir"	(i)						\
+	: "cc");							\
+}									\
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned int val;				                \
+	SCOND_FAIL_RETRY_VAR_DEF                                        \
+									\
+	/*								\
+	 * Explicit full memory barrier needed before/after as		\
+	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
+	 */								\
+	smp_mb();							\
+									\
+	__asm__ __volatile__(						\
+	"1:	llock   %[val], [%[ctr]]		\n"		\
+	"	" #asm_op " %[val], %[val], %[i]	\n"		\
+	"	scond   %[val], [%[ctr]]		\n"		\
+	"						\n"		\
+	SCOND_FAIL_RETRY_ASM						\
+									\
+	: [val]	"=&r"	(val)						\
+	  SCOND_FAIL_RETRY_VARS						\
+	: [ctr]	"r"	(&v->counter),					\
+	  [i]	"ir"	(i)						\
+	: "cc");							\
+									\
+	smp_mb();							\
+									\
+	return val;							\
+}
+
+#else	/* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	/*
+	 * Independent of hardware support, all of the atomic_xxx() APIs need
+	 * to follow the same locking rules to make sure that a "hardware"
+	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+	 * sequence
+	 *
+	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+	 * requires the locking.
+	 */
+	unsigned long flags;
+
+	atomic_ops_lock(flags);
+	WRITE_ONCE(v->counter, i);
+	atomic_ops_unlock(flags);
+}
+
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+#define ATOMIC_OP(op, c_op, asm_op)					\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long flags;						\
+									\
+	atomic_ops_lock(flags);						\
+	v->counter c_op i;						\
+	atomic_ops_unlock(flags);					\
+}
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned long flags;						\
+	unsigned long temp;						\
+									\
+	/*								\
+	 * spin lock/unlock provides the needed smp_mb() before/after	\
+	 */								\
+	atomic_ops_lock(flags);						\
+	temp = v->counter;						\
+	temp c_op i;							\
+	v->counter = temp;						\
+	atomic_ops_unlock(flags);					\
+									\
+	return temp;							\
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#define ATOMIC_OPS(op, c_op, asm_op)					\
+	ATOMIC_OP(op, c_op, asm_op)					\
+	ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u)					\
+({									\
+	int c, old;							\
+									\
+	/*								\
+	 * Explicit full memory barrier needed before/after as		\
+	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
+	 */								\
+	smp_mb();							\
+									\
+	c = atomic_read(v);						\
+	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+		c = old;						\
+									\
+	smp_mb();							\
+									\
+	c;								\
+})
+
+#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v)			atomic_add(1, v)
+#define atomic_dec(v)			atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i)			{ (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 0000000..a720998
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifdef CONFIG_ISA_ARCV2
+
+/*
+ * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
+ * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
+ *
+ * Explicit barrier provided by DMB instruction
+ *  - Operand supports fine grained load/store/load+store semantics
+ *  - Ensures that selected memory operation issued before it will complete
+ *    before any subsequent memory operation of same type
+ *  - DMB guarantees SMP as well as local barrier semantics
+ *    (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
+ *    UP: barrier(), SMP: smp_*mb == *mb)
+ *  - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
+ *    in the general case. Plus it only provides full barrier.
+ */
+
+#define mb()	asm volatile("dmb 3\n" : : : "memory")
+#define rmb()	asm volatile("dmb 1\n" : : : "memory")
+#define wmb()	asm volatile("dmb 2\n" : : : "memory")
+
+#endif
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/*
+ * ARCompact based cores (ARC700) only have SYNC instruction which is super
+ * heavy weight as it flushes the pipeline as well.
+ * There are no real SMP implementations of such cores.
+ */
+
+#define mb()	asm volatile("sync\n" : : : "memory")
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 0000000..0352fb8
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#ifndef CONFIG_ARC_HAS_LLSC
+#include <asm/smp.h>
+#endif
+
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+/*
+ * Hardware assisted Atomic-R-M-W
+ */
+
+#define BIT_OP(op, c_op, asm_op)					\
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	unsigned int temp;						\
+									\
+	m += nr >> 5;							\
+									\
+	nr &= 0x1f;							\
+									\
+	__asm__ __volatile__(						\
+	"1:	llock       %0, [%1]		\n"			\
+	"	" #asm_op " %0, %0, %2	\n"				\
+	"	scond       %0, [%1]		\n"			\
+	"	bnz         1b			\n"			\
+	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
+	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
+	  "ir"(nr)							\
+	: "cc");							\
+}
+
+/*
+ * Semantically:
+ *    Test the bit
+ *    if clear
+ *        set it and return 0 (old value)
+ *    else
+ *        return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+#define TEST_N_BIT_OP(op, c_op, asm_op)					\
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	unsigned long old, temp;					\
+									\
+	m += nr >> 5;							\
+									\
+	nr &= 0x1f;							\
+									\
+	/*								\
+	 * Explicit full memory barrier needed before/after as		\
+	 * LLOCK/SCOND themselves don't provide any such smenatic	\
+	 */								\
+	smp_mb();							\
+									\
+	__asm__ __volatile__(						\
+	"1:	llock       %0, [%2]	\n"				\
+	"	" #asm_op " %1, %0, %3	\n"				\
+	"	scond       %1, [%2]	\n"				\
+	"	bnz         1b		\n"				\
+	: "=&r"(old), "=&r"(temp)					\
+	: "r"(m), "ir"(nr)						\
+	: "cc");							\
+									\
+	smp_mb();							\
+									\
+	return (old & (1 << nr)) != 0;					\
+}
+
+#else	/* !CONFIG_ARC_HAS_LLSC */
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ *     This avoids extra code to be generated for pointer arithmatic, since
+ *     is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ *     only consider bottom 5 bits of @nr, so NO need to mask them off.
+ *     (GCC Quirk: however for constant @nr we still need to do the masking
+ *             at compile time)
+ */
+
+#define BIT_OP(op, c_op, asm_op)					\
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	unsigned long temp, flags;					\
+	m += nr >> 5;							\
+									\
+	/*								\
+	 * spin lock/unlock provide the needed smp_mb() before/after	\
+	 */								\
+	bitops_lock(flags);						\
+									\
+	temp = *m;							\
+	*m = temp c_op (1UL << (nr & 0x1f));					\
+									\
+	bitops_unlock(flags);						\
+}
+
+#define TEST_N_BIT_OP(op, c_op, asm_op)					\
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	unsigned long old, flags;					\
+	m += nr >> 5;							\
+									\
+	bitops_lock(flags);						\
+									\
+	old = *m;							\
+	*m = old c_op (1UL << (nr & 0x1f));				\
+									\
+	bitops_unlock(flags);						\
+									\
+	return (old & (1UL << (nr & 0x1f))) != 0;			\
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+#define __BIT_OP(op, c_op, asm_op)					\
+static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
+{									\
+	unsigned long temp;						\
+	m += nr >> 5;							\
+									\
+	temp = *m;							\
+	*m = temp c_op (1UL << (nr & 0x1f));				\
+}
+
+#define __TEST_N_BIT_OP(op, c_op, asm_op)				\
+static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	unsigned long old;						\
+	m += nr >> 5;							\
+									\
+	old = *m;							\
+	*m = old c_op (1UL << (nr & 0x1f));				\
+									\
+	return (old & (1UL << (nr & 0x1f))) != 0;			\
+}
+
+#define BIT_OPS(op, c_op, asm_op)					\
+									\
+	/* set_bit(), clear_bit(), change_bit() */			\
+	BIT_OP(op, c_op, asm_op)					\
+									\
+	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
+	TEST_N_BIT_OP(op, c_op, asm_op)					\
+									\
+	/* __set_bit(), __clear_bit(), __change_bit() */		\
+	__BIT_OP(op, c_op, asm_op)					\
+									\
+	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
+	__TEST_N_BIT_OP(op, c_op, asm_op)
+
+BIT_OPS(set, |, bset)
+BIT_OPS(clear, & ~, bclr)
+BIT_OPS(change, ^, bxor)
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+	unsigned long mask;
+
+	addr += nr >> 5;
+
+	mask = 1UL << (nr & 0x1f);
+
+	return ((mask & *addr) != 0);
+}
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+	unsigned int res;
+
+	__asm__ __volatile__(
+	"	norm.f  %0, %1		\n"
+	"	mov.n   %0, 0		\n"
+	"	add.p   %0, %0, 1	\n"
+	: "=r"(res)
+	: "r"(x)
+	: "cc");
+
+	return res;
+}
+
+static inline int constant_fls(int x)
+{
+	int r = 32;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff0000u)) {
+		x <<= 16;
+		r -= 16;
+	}
+	if (!(x & 0xff000000u)) {
+		x <<= 8;
+		r -= 8;
+	}
+	if (!(x & 0xf0000000u)) {
+		x <<= 4;
+		r -= 4;
+	}
+	if (!(x & 0xc0000000u)) {
+		x <<= 2;
+		r -= 2;
+	}
+	if (!(x & 0x80000000u)) {
+		x <<= 1;
+		r -= 1;
+	}
+	return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+	if (__builtin_constant_p(x))
+	       return constant_fls(x);
+
+	return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+	if (!x)
+		return 0;
+	else
+		return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+	if (!word)
+		return word;
+
+	return ffs(word) - 1;
+}
+
+#else	/* CONFIG_ISA_ARCV2 */
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+	int n;
+
+	asm volatile(
+	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
+	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
+	: "=r"(n)	/* Early clobber not needed */
+	: "r"(x)
+	: "cc");
+
+	return n;
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+	/* FLS insn has exactly same semantics as the API */
+	return	__builtin_arc_fls(x);
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+static inline __attribute__ ((const)) int ffs(unsigned long x)
+{
+	int n;
+
+	asm volatile(
+	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
+	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
+	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
+	: "=r"(n)	/* Early clobber not needed */
+	: "r"(x)
+	: "cc");
+
+	return n;
+}
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long x)
+{
+	int n;
+
+	asm volatile(
+	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
+	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
+	: "=r"(n)
+	: "r"(x)
+	: "cc");
+
+	return n;
+
+}
+
+#endif	/* CONFIG_ISA_ARCOMPACT */
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x)	__ffs(~(x))
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 0000000..ea022d4
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+			    unsigned long address);
+void die(const char *str, struct pt_regs *regs, unsigned long address);
+
+#define BUG()	do {								\
+	pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+	dump_stack();								\
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif	/* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 0000000..0ddd714
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT		6
+#else
+#define L1_CACHE_SHIFT		CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define CACHE_LINE_MASK		(~(L1_CACHE_BYTES - 1))
+
+/*
+ * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF)
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE	0xc0000000
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr)	\
+({					\
+	unsigned int __ret;		\
+	__asm__ __volatile__(		\
+	"	ld.di %0, [%1]	\n"	\
+	: "=r"(__ret)			\
+	: "r"(ptr));			\
+	__ret;				\
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({					\
+	__asm__ __volatile__(		\
+	"	st.di %0, [%1]	\n"	\
+	:				\
+	: "r"(data), "r"(ptr));		\
+})
+
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void read_decode_cache_bcr(void);
+
+extern int ioc_exists;
+
+#endif	/* !__ASSEMBLY__ */
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR		0x77	/* Build Config reg */
+#define ARC_REG_IC_IVIC		0x10
+#define ARC_REG_IC_CTRL		0x11
+#define ARC_REG_IC_IVIL		0x19
+#define ARC_REG_IC_PTAG		0x1E
+#define ARC_REG_IC_PTAG_HI	0x1F
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE   0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR		0x72	/* Build Config reg */
+#define ARC_REG_DC_IVDC		0x47
+#define ARC_REG_DC_CTRL		0x48
+#define ARC_REG_DC_IVDL		0x4A
+#define ARC_REG_DC_FLSH		0x4B
+#define ARC_REG_DC_FLDL		0x4C
+#define ARC_REG_DC_PTAG		0x5C
+#define ARC_REG_DC_PTAG_HI	0x5F
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH  0x40
+#define DC_CTRL_FLUSH_STATUS    0x100
+
+/*System-level cache (L2 cache) related Auxiliary registers */
+#define ARC_REG_SLC_CFG		0x901
+#define ARC_REG_SLC_CTRL	0x903
+#define ARC_REG_SLC_FLUSH	0x904
+#define ARC_REG_SLC_INVALIDATE	0x905
+#define ARC_REG_SLC_RGN_START	0x914
+#define ARC_REG_SLC_RGN_START1	0x915
+#define ARC_REG_SLC_RGN_END	0x916
+#define ARC_REG_SLC_RGN_END1	0x917
+
+/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_IM		0x040
+#define SLC_CTRL_DISABLE	0x001
+#define SLC_CTRL_BUSY		0x100
+#define SLC_CTRL_RGN_OP_INV	0x200
+
+/* IO coherency related Auxiliary registers */
+#define ARC_REG_IO_COH_ENABLE	0x500
+#define ARC_REG_IO_COH_PARTIAL	0x501
+#define ARC_REG_IO_COH_AP0_BASE	0x508
+#define ARC_REG_IO_COH_AP0_SIZE	0x509
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 0000000..56aeb5e
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ *   -flush_cache_dup_mm (fork)
+ *   -likewise for flush_cache_mm (exit/execve)
+ *   -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ *  vineetg: April 2008
+ *   -Added a critical CacheLine flush to copy_to_user_page( ) which
+ *     was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/shmparam.h>
+
+/*
+ * Semantically we need this because icache doesn't snoop dcache/dma.
+ * However ARC Cache flush requires paddr as well as vaddr, latter not available
+ * in the flush_icache_page() API. So we no-op it but do the equivalent work
+ * in update_mmu_cache()
+ */
+#define flush_icache_page(vma, page)
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long kstart, unsigned long kend);
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
+void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
+void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end)		flush_cache_all()
+#define flush_cache_vunmap(start, end)		flush_cache_all()
+
+#define flush_cache_dup_mm(mm)			/* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+#define flush_cache_mm(mm)			/* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn)	/* PF handling/COW-break */
+
+#else	/* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+	unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+	unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+	struct page *page, unsigned long u_vaddr);
+
+#endif	/* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
+ * This works around some PIO based drivers which don't call flush_dcache_page
+ * to record that they dirtied the dcache
+ */
+#define PG_dc_clean	PG_arch_1
+
+#define CACHE_COLORS_NUM	4
+#define CACHE_COLORS_MSK	(CACHE_COLORS_NUM - 1)
+#define CACHE_COLOR(addr)	(((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+	return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+}
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2)				\
+({									\
+	cache_is_vipt_aliasing() ? 					\
+		(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0;		\
+})
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
+do {									\
+	memcpy(dst, src, len);						\
+	if (vma->vm_flags & VM_EXEC)					\
+		__sync_icache_dcache((unsigned long)(dst), vaddr, len);	\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
+	memcpy(dst, src, len);						\
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 0000000..1095729
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke  <joern.rennecke@embecosm.com>: Jan 2012
+ *  -Insn Scheduling improvements to csum core routines.
+ *      = csum_fold( ) largely derived from ARM version.
+ *      = ip_fast_cum( ) to have module scheduling
+ *  -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ *   worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ *  -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ *	Fold a partial checksum
+ *
+ *  The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ *  added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+	unsigned r = s << 16 | s >> 16;	/* ror */
+	s = ~s;
+	s -= r;
+	return s >> 16;
+}
+
+/*
+ *	This is a version of ip_compute_csum() optimized for IP headers,
+ *	which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	const void *ptr = iph;
+	unsigned int tmp, tmp2, sum;
+
+	__asm__(
+	"	ld.ab  %0, [%3, 4]		\n"
+	"	ld.ab  %2, [%3, 4]		\n"
+	"	sub    %1, %4, 2		\n"
+	"	lsr.f  lp_count, %1, 1		\n"
+	"	bcc    0f			\n"
+	"	add.f  %0, %0, %2		\n"
+	"	ld.ab  %2, [%3, 4]		\n"
+	"0:	lp     1f			\n"
+	"	ld.ab  %1, [%3, 4]		\n"
+	"	adc.f  %0, %0, %2		\n"
+	"	ld.ab  %2, [%3, 4]		\n"
+	"	adc.f  %0, %0, %1		\n"
+	"1:	adc.f  %0, %0, %2		\n"
+	"	add.cs %0,%0,1			\n"
+	: "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+	: "r"(ihl)
+	: "cc", "lp_count", "memory");
+
+	return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
+{
+	__asm__ __volatile__(
+	"	add.f %0, %0, %1	\n"
+	"	adc.f %0, %0, %2	\n"
+	"	adc.f %0, %0, %3	\n"
+	"	adc.f %0, %0, %4	\n"
+	"	adc   %0, %0, 0		\n"
+	: "+&r"(sum)
+	: "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	  "r"(len),
+#else
+	  "r"(len << 8),
+#endif
+	  "r"(htons(proto))
+	: "cc");
+
+	return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 0000000..bf9d29f
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+	return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 0000000..af7a2db
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+	unsigned long prev;
+
+	/*
+	 * Explicit full memory barrier needed before/after as
+	 * LLOCK/SCOND thmeselves don't provide any such semantics
+	 */
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	brne    %0, %2, 2f	\n"
+	"	scond   %3, [%1]	\n"
+	"	bnz     1b		\n"
+	"2:				\n"
+	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
+	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
+	  "ir"(expected),
+	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
+	: "cc", "memory"); /* so that gcc knows memory is being written here */
+
+	smp_mb();
+
+	return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+	unsigned long flags;
+	int prev;
+	volatile unsigned long *p = ptr;
+
+	/*
+	 * spin lock/unlock provide the needed smp_mb() before/after
+	 */
+	atomic_ops_lock(flags);
+	prev = *p;
+	if (prev == expected)
+		*p = new;
+	atomic_ops_unlock(flags);
+	return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+				(unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+				   int size)
+{
+	extern unsigned long __xchg_bad_pointer(void);
+
+	switch (size) {
+	case 4:
+		smp_mb();
+
+		__asm__ __volatile__(
+		"	ex  %0, [%1]	\n"
+		: "+r"(val)
+		: "r"(ptr)
+		: "memory");
+
+		smp_mb();
+
+		return val;
+	}
+	return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+						 sizeof(*(ptr))))
+
+/*
+ * xchg() maps directly to ARC EX instruction which guarantees atomicity.
+ * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
+ * due to a subtle reason:
+ *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
+ *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
+ *    Hence xchg() needs to follow same locking rules.
+ *
+ * Technically the lock is also needed for UP (boils down to irq save/restore)
+ * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
+ * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
+ * Other way around, xchg is one instruction anyways, so can't be interrupted
+ * as such
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with)			\
+({					\
+	unsigned long flags;		\
+	typeof(*(ptr)) old_val;		\
+					\
+	atomic_ops_lock(flags);		\
+	old_val = _xchg(ptr, with);	\
+	atomic_ops_unlock(flags);	\
+	old_val;			\
+})
+
+#else
+
+#define xchg(ptr, with)  _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ *   LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ *         is natively "SMP safe", no serialization required).
+ *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ *         could clobber them. atomic_xchg() itself would be 1 insn, so it
+ *         can't be clobbered by others. Thus no serialization required when
+ *         atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 0000000..c2453ee
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ *  - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 0000000..d5da211
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ *  -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ *  -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h>		/* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+	__asm__ __volatile__(
+	"	mov lp_count, %0	\n"
+	"	lp  1f			\n"
+	"	nop			\n"
+	"1:				\n"
+	:
+        : "r"(loops)
+        : "lp_count");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ *  -we have precomputed @loops_per_jiffy
+ *  -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ *  -Mathematically if we multiply and divide a number by same value the
+ *   result remains unchanged:  In this case, we use 2^32
+ *  -> (loops_per_N_usec * 2^32 ) / 2^32
+ *  -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ *  -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ *  -Divide by 2^32 is very simply right shift by 32
+ *  -We simply need to ensure that the multiply per above eqn happens in
+ *   64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+	unsigned long loops;
+
+	/* (u64) cast ensures 64 bit MPY - real or emulated
+	 * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+	 */
+	loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
+
+	__delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+				: __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 0000000..f1cce3d
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+	op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+	op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+	op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+	op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+	op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+	op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+	op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+	noflow,
+	direct_jump,
+	direct_call,
+	indirect_jump,
+	indirect_call,
+	invalid_instr
+};
+
+#define IS_BIT(word, n)		((word) & (1<<n))
+#define BITS(word, s, e)	(((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word)	(BITS((word), 27, 31))
+#define MINOR_OPCODE(word)	(BITS((word), 16, 21))
+#define FIELD_A(word)		(BITS((word), 0, 5))
+#define FIELD_B(word)		((BITS((word), 12, 14)<<3) | \
+				(BITS((word), 24, 26)))
+#define FIELD_C(word)		(BITS((word), 6, 11))
+#define FIELD_u6(word)		FIELDC(word)
+#define FIELD_s12(word)		sign_extend(((BITS((word), 0, 5) << 6) | \
+					BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word)		sign_extend(((BITS(word, 15, 15) << 8) | \
+					BITS(word, 16, 23)), 9)
+#define FIELD_s21(word)		sign_extend(((BITS(word, 6, 15) << 11) | \
+					(BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word)		sign_extend(((BITS(word, 0, 3) << 21) | \
+					(BITS(word, 6, 15) << 11) | \
+					(BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word)		((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word)		((BITS((word), 10, 10)<<3) | \
+				BITS((word), 8, 10))
+#define FIELD_S_C(word)		((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word)		((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word)	(BITS((word), 0, 4))
+#define FIELD_S_u6(word)	(BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word)	(BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word)	(BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word)	sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word)	sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word)	sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word)	sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word)	sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word)	sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L		0x00000100
+#define REG_LIMM		62
+
+struct disasm_state {
+	/* generic info */
+	unsigned long words[2];
+	int instr_len;
+	int major_opcode;
+	/* info for branch/jump */
+	int is_branch;
+	int target;
+	int delay_slot;
+	enum flow flow;
+	/* info for load/store */
+	int src1, src2, src3, dest, wb_reg;
+	int zz, aa, x, pref, di;
+	int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+	if (IS_BIT(value, (bits - 1)))
+		value |= (0xffffffff << bits);
+
+	return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+	uint16_t word = *((uint16_t *)addr);
+	int opcode = (word >> 11) & 0x1F;
+	return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+	int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+	*cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+		struct callee_regs *cregs);
+
+#endif	/* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 0000000..2d28ba9
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,202 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+			    dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+			  dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+			 dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+		       dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+				  void *cpu_addr, dma_addr_t dma_addr,
+				  size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+					   enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+		dma_cache_inv(paddr, size);
+		break;
+	case DMA_TO_DEVICE:
+		dma_cache_wback(paddr, size);
+		break;
+	case DMA_BIDIRECTIONAL:
+		dma_cache_wback_inv(paddr, size);
+		break;
+	default:
+		pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+	}
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+			  enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir)			\
+do {							\
+	if (__builtin_constant_p(dir))			\
+		__inline_dma_cache_sync(addr, sz, dir);	\
+	else						\
+		__arc_dma_cache_sync(addr, sz, dir);	\
+}							\
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+	       enum dma_data_direction dir)
+{
+	_dma_cache_sync((unsigned long)cpu_addr, size, dir);
+	return (dma_addr_t)cpu_addr;
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+		 size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size,
+	     enum dma_data_direction dir)
+{
+	unsigned long paddr = page_to_phys(page) + offset;
+	return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+	       size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+	   int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+					       s->length, dir);
+
+	return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+	     int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+			size_t size, enum dma_data_direction dir)
+{
+	_dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+			   size_t size, enum dma_data_direction dir)
+{
+	_dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+			      unsigned long offset, size_t size,
+			      enum dma_data_direction direction)
+{
+	_dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+				 unsigned long offset, size_t size,
+				 enum dma_data_direction direction)
+{
+	_dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
+		    enum dma_data_direction dir)
+{
+	int i;
+	struct scatterlist *sg;
+
+	for_each_sg(sglist, sg, nelems, i)
+		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+		       int nelems, enum dma_data_direction dir)
+{
+	int i;
+	struct scatterlist *sg;
+
+	for_each_sg(sglist, sg, nelems, i)
+		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+	/* Support 32 bit DMA mask exclusively */
+	return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 0000000..ca7c451
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 0000000..51a99e2
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT		93
+
+#define EM_ARCV2		195	/* ARCv2 Cores */
+
+#define EM_ARC_INUSE		(IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \
+					EM_ARCOMPACT : EM_ARCV2)
+
+/* ARC Relocations (kernel Modules only) */
+#define  R_ARC_32		0x4
+#define  R_ARC_32_ME		0x1B
+#define  R_ARC_S25H_PCREL	0x10
+#define  R_ARC_S25W_PCREL	0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH		EM_ARCOMPACT
+#define ELF_CLASS		ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA		ELFDATA2MSB
+#else
+#define ELF_DATA		ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ *  -we don't load something for the wrong architecture.
+ *  -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch	elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader.  We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE		(2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI.  A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr)	((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP	(0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.  This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM	(NULL)
+
+#endif
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
new file mode 100644
index 0000000..aee1a77
--- /dev/null
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -0,0 +1,192 @@
+
+#ifndef __ASM_ARC_ENTRY_ARCV2_H
+#define __ASM_ARC_ENTRY_ARCV2_H
+
+#include <asm/asm-offsets.h>
+#include <asm/irqflags-arcv2.h>
+#include <asm/thread_info.h>	/* For THREAD_SIZE */
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_PROLOGUE	called_from
+
+	; Before jumping to Interrupt Vector, hardware micro-ops did following:
+	;   1. SP auto-switched to kernel mode stack
+	;   2. STATUS32.Z flag set to U mode at time of interrupt (U:1, K:0)
+	;   3. Auto saved: r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI, PC, STAT32
+	;
+	; Now manually save: r12, sp, fp, gp, r25
+
+	PUSH	r30
+	PUSH	r12
+
+	; Saving pt_regs->sp correctly requires some extra work due to the way
+	; Auto stack switch works
+	;  - U mode: retrieve it from AUX_USER_SP
+	;  - K mode: add the offset from current SP where H/w starts auto push
+	;
+	; Utilize the fact that Z bit is set if Intr taken in U mode
+	mov.nz	r9, sp
+	add.nz	r9, r9, SZ_PT_REGS - PT_sp - 4
+	bnz	1f
+
+	lr	r9, [AUX_USER_SP]
+1:
+	PUSH	r9	; SP
+
+	PUSH	fp
+	PUSH	gp
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	PUSH	r25			; user_r25
+	GET_CURR_TASK_ON_CPU	r25
+#else
+	sub	sp, sp, 4
+#endif
+
+.ifnc \called_from, exception
+	sub	sp, sp, 12	; BTA/ECR/orig_r0 placeholder per pt_regs
+.endif
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE	called_from
+
+.ifnc \called_from, exception
+	add	sp, sp, 12	; skip BTA/ECR/orig_r0 placeholderss
+.endif
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	POP	r25
+#else
+	add	sp, sp, 4
+#endif
+
+	POP	gp
+	POP	fp
+
+	; Don't touch AUX_USER_SP if returning to K mode (Z bit set)
+	; (Z bit set on K mode is inverse of INTERRUPT_PROLOGUE)
+	add.z	sp, sp, 4
+	bz	1f
+
+	POPAX	AUX_USER_SP
+1:
+	POP	r12
+	POP	r30
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+	; Before jumping to Exception Vector, hardware micro-ops did following:
+	;   1. SP auto-switched to kernel mode stack
+	;   2. STATUS32.Z flag set to U mode at time of interrupt (U:1,K:0)
+	;
+	; Now manually save the complete reg file
+
+	PUSH	r9		; freeup a register: slot of erstatus
+
+	PUSHAX	eret
+	sub	sp, sp, 12	; skip JLI, LDI, EI
+	PUSH	lp_count
+	PUSHAX	lp_start
+	PUSHAX	lp_end
+	PUSH	blink
+
+	PUSH	r11
+	PUSH	r10
+
+	ld.as	r9,  [sp, 10]	; load stashed r9 (status32 stack slot)
+	lr	r10, [erstatus]
+	st.as	r10, [sp, 10]	; save status32 at it's right stack slot
+
+	PUSH	r9
+	PUSH	r8
+	PUSH	r7
+	PUSH	r6
+	PUSH	r5
+	PUSH	r4
+	PUSH	r3
+	PUSH	r2
+	PUSH	r1
+	PUSH	r0
+
+	; -- for interrupts, regs above are auto-saved by h/w in that order --
+	; Now do what ISR prologue does (manually save r12, sp, fp, gp, r25)
+	;
+	; Set Z flag if this was from U mode (expected by INTERRUPT_PROLOGUE)
+	; Although H/w exception micro-ops do set Z flag for U mode (just like
+	; for interrupts), it could get clobbered in case we soft land here from
+	; a TLB Miss exception handler (tlbex.S)
+
+	and	r10, r10, STATUS_U_MASK
+	xor.f	0, r10, STATUS_U_MASK
+
+	INTERRUPT_PROLOGUE  exception
+
+	PUSHAX	erbta
+	PUSHAX	ecr		; r9 contains ECR, expected by EV_Trap
+
+	PUSH	r0		; orig_r0
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+
+	; Assumes r0 has PT_status32
+	btst   r0, STATUS_U_BIT	; Z flag set if K, used in INTERRUPT_EPILOGUE
+
+	add	sp, sp, 8	; orig_r0/ECR don't need restoring
+	POPAX	erbta
+
+	INTERRUPT_EPILOGUE  exception
+
+	POP	r0
+	POP	r1
+	POP	r2
+	POP	r3
+	POP	r4
+	POP	r5
+	POP	r6
+	POP	r7
+	POP	r8
+	POP	r9
+	POP	r10
+	POP	r11
+
+	POP	blink
+	POPAX	lp_end
+	POPAX	lp_start
+
+	POP	r9
+	mov	lp_count, r9
+
+	add	sp, sp, 12	; skip JLI, LDI, EI
+	POPAX	eret
+	POPAX	erstatus
+
+	ld.as	r9, [sp, -12]	; reload r9 which got clobbered
+.endm
+
+.macro FAKE_RET_FROM_EXCPN
+	lr      r9, [status32]
+	bic     r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
+	or      r9, r9, (STATUS_L_MASK|STATUS_IE_MASK)
+	kflag   r9
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP  reg
+	bmskn \reg, sp, THREAD_SHIFT - 1
+.endm
+
+/* Get CPU-ID of this core */
+.macro  GET_CPU_ID  reg
+	lr  \reg, [identity]
+	xbfu \reg, \reg, 0xE8	/* 00111    01000 */
+				/* M = 8-1  N = 8 */
+.endm
+
+#endif
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
new file mode 100644
index 0000000..1aff3be
--- /dev/null
+++ b/arch/arc/include/asm/entry-compact.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *  Stack switching code can no longer reliably rely on the fact that
+ *  if we are NOT in user mode, stack is switched to kernel mode.
+ *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ *  it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ *   Normally CPU does this automatically, however when doing FAKE rtie,
+ *   we also need to explicitly do this. The problem in macros
+ *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ *  -Modified CALLEE_REG save/restore macros to handle the fact that
+ *      r25 contains the kernel current task ptr
+ *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ *      address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_COMPACT_H
+#define __ASM_ARC_ENTRY_COMPACT_H
+
+#include <asm/asm-offsets.h>
+#include <asm/irqflags-compact.h>
+#include <asm/thread_info.h>	/* For THREAD_SIZE */
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry   : r9 contains pre-IRQ/exception/trap status32
+ * Exit    : SP set to K mode stack
+ *           SP at the time of entry (K/U) saved @ pt_regs->sp
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+	/* User Mode when this happened ? Yes: Proceed to switch stack */
+	bbit1   r9, STATUS_U_BIT, 88f
+
+	/* OK we were already in kernel mode when this event happened, thus can
+	 * assume SP is kernel mode SP. _NO_ need to do any stack switching
+	 */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+	/* However....
+	 * If Level 2 Interrupts enabled, we may end up with a corner case:
+	 * 1. User Task executing
+	 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+	 * 3. But before it could switch SP from USER to KERNEL stack
+	 *      a L2 IRQ "Interrupts" L1
+	 * Thay way although L2 IRQ happened in Kernel mode, stack is still
+	 * not switched.
+	 * To handle this, we may need to switch stack even if in kernel mode
+	 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+	 */
+	brlo sp, VMALLOC_START, 88f
+
+	/* TODO: vineetg:
+	 * We need to be a bit more cautious here. What if a kernel bug in
+	 * L1 ISR, caused SP to go whaco (some small value which looks like
+	 * USER stk) and then we take L2 ISR.
+	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
+	 * instead of shouting alound
+	 * The only feasible way is to make sure this L2 happened in
+	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+	 * L1 ISR before it switches stack
+	 */
+
+#endif
+
+    /*------Intr/Ecxp happened in kernel mode, SP already setup ------ */
+	/* save it nevertheless @ pt_regs->sp for uniformity */
+
+	b.d	66f
+	st	sp, [sp, PT_sp - SZ_PT_REGS]
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+	GET_CURR_TASK_ON_CPU   r9
+
+	/* With current tsk in r9, get it's kernel mode stack base */
+	GET_TSK_STACK_BASE  r9, r9
+
+	/* save U mode SP @ pt_regs->sp */
+	st	sp, [r9, PT_sp - SZ_PT_REGS]
+
+	/* final SP switch */
+	mov	sp, r9
+66:
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN
+
+	lr	r9, [status32]
+	bclr	r9, r9, STATUS_AE_BIT
+	or	r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK)
+	sr	r9, [erstatus]
+	mov	r9, 55f
+	sr	r9, [eret]
+	rtie
+55:
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception/ISR Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of pt_regs.
+ *-------------------------------------------------------------*/
+.macro PROLOG_FREEUP_REG	reg, mem
+#ifdef CONFIG_SMP
+	sr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+	st  \reg, [\mem]
+#endif
+.endm
+
+.macro PROLOG_RESTORE_REG	reg, mem
+#ifdef CONFIG_SMP
+	lr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+	ld  \reg, [\mem]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+	/* Need at least 1 reg to code the early exception prologue */
+	PROLOG_FREEUP_REG r9, @ex_saved_reg1
+
+	/* U/K mode at time of exception (stack not switched if already K) */
+	lr  r9, [erstatus]
+
+	/* ARC700 doesn't provide auto-stack switching */
+	SWITCH_TO_KERNEL_STK
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	/* Treat r25 as scratch reg (save on stack) and load with "current" */
+	PUSH    r25
+	GET_CURR_TASK_ON_CPU   r25
+#else
+	sub     sp, sp, 4
+#endif
+
+	st.a	r0, [sp, -8]    /* orig_r0 needed for syscall (skip ECR slot) */
+	sub	sp, sp, 4	/* skip pt_regs->sp, already saved above */
+
+	/* Restore r9 used to code the early prologue */
+	PROLOG_RESTORE_REG  r9, @ex_saved_reg1
+
+	/* now we are ready to save the regfile */
+	SAVE_R0_TO_R12
+	PUSH	gp
+	PUSH	fp
+	PUSH	blink
+	PUSHAX	eret
+	PUSHAX	erstatus
+	PUSH	lp_count
+	PUSHAX	lp_end
+	PUSHAX	lp_start
+	PUSHAX	erbta
+
+	lr	r9, [ecr]
+	st      r9, [sp, PT_event]    /* EV_Trap expects r9 to have ECR */
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+	POPAX	erbta
+	POPAX	lp_start
+	POPAX	lp_end
+
+	POP	r9
+	mov	lp_count, r9	;LD to lp_count is not allowed
+
+	POPAX	erstatus
+	POPAX	eret
+	POP	blink
+	POP	fp
+	POP	gp
+	RESTORE_R12_TO_R0
+
+	ld  sp, [sp] /* restore original sp */
+	/* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+/* Dummy ECR values for Interrupts */
+#define event_IRQ1		0x0031abcd
+#define event_IRQ2		0x0032abcd
+
+.macro INTERRUPT_PROLOGUE  LVL
+
+	/* free up r9 as scratchpad */
+	PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
+
+	/* Which mode (user/kernel) was the system in when intr occured */
+	lr  r9, [status32_l\LVL\()]
+
+	SWITCH_TO_KERNEL_STK
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	/* Treat r25 as scratch reg (save on stack) and load with "current" */
+	PUSH    r25
+	GET_CURR_TASK_ON_CPU   r25
+#else
+	sub     sp, sp, 4
+#endif
+
+	PUSH	0x003\LVL\()abcd    /* Dummy ECR */
+	sub	sp, sp, 8	    /* skip orig_r0 (not needed)
+				       skip pt_regs->sp, already saved above */
+
+	/* Restore r9 used to code the early prologue */
+	PROLOG_RESTORE_REG  r9, @int\LVL\()_saved_reg
+
+	SAVE_R0_TO_R12
+	PUSH	gp
+	PUSH	fp
+	PUSH	blink
+	PUSH	ilink\LVL\()
+	PUSHAX	status32_l\LVL\()
+	PUSH	lp_count
+	PUSHAX	lp_end
+	PUSHAX	lp_start
+	PUSHAX	bta_l\LVL\()
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE  LVL
+	POPAX	bta_l\LVL\()
+	POPAX	lp_start
+	POPAX	lp_end
+
+	POP	r9
+	mov	lp_count, r9	;LD to lp_count is not allowed
+
+	POPAX	status32_l\LVL\()
+	POP	ilink\LVL\()
+	POP	blink
+	POP	fp
+	POP	gp
+	RESTORE_R12_TO_R0
+
+	ld  sp, [sp] /* restore original sp */
+	/* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP  reg
+	bic \reg, sp, (THREAD_SIZE - 1)
+.endm
+
+/* Get CPU-ID of this core */
+.macro  GET_CPU_ID  reg
+	lr  \reg, [identity]
+	lsr \reg, \reg, 8
+	bmsk \reg, \reg, 7
+.endm
+
+#endif  /* __ASM_ARC_ENTRY_COMPACT_H */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 0000000..51597f3
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#include <asm/unistd.h>		/* For NR_syscalls defination */
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>	/* For VMALLOC_START */
+#include <asm/mmu.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/entry-compact.h>	/* ISA specific bits */
+#else
+#include <asm/entry-arcv2.h>
+#endif
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a    reg1, [reg2, x]  => Pre Incr
+ *      Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab   reg1, [reg2, x]  => Post Incr
+ *      Eff Addr for load = [reg2]
+ */
+
+.macro PUSH reg
+	st.a	\reg, [sp, -4]
+.endm
+
+.macro PUSHAX aux
+	lr	r9, [\aux]
+	PUSH	r9
+.endm
+
+.macro POP reg
+	ld.ab	\reg, [sp, 4]
+.endm
+
+.macro POPAX aux
+	POP	r9
+	sr	r9, [\aux]
+.endm
+
+/*--------------------------------------------------------------
+ * Helpers to save/restore Scratch Regs:
+ * used by Interrupt/Exception Prologue/Epilogue
+ *-------------------------------------------------------------*/
+.macro  SAVE_R0_TO_R12
+	PUSH	r0
+	PUSH	r1
+	PUSH	r2
+	PUSH	r3
+	PUSH	r4
+	PUSH	r5
+	PUSH	r6
+	PUSH	r7
+	PUSH	r8
+	PUSH	r9
+	PUSH	r10
+	PUSH	r11
+	PUSH	r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+	POP	r12
+	POP	r11
+	POP	r10
+	POP	r9
+	POP	r8
+	POP	r7
+	POP	r6
+	POP	r5
+	POP	r4
+	POP	r3
+	POP	r2
+	POP	r1
+	POP	r0
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	ld	r25, [sp, 12]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Helpers to save/restore callee-saved regs:
+ * used by several macros below
+ *-------------------------------------------------------------*/
+.macro SAVE_R13_TO_R24
+	PUSH	r13
+	PUSH	r14
+	PUSH	r15
+	PUSH	r16
+	PUSH	r17
+	PUSH	r18
+	PUSH	r19
+	PUSH	r20
+	PUSH	r21
+	PUSH	r22
+	PUSH	r23
+	PUSH	r24
+.endm
+
+.macro RESTORE_R24_TO_R13
+	POP	r24
+	POP	r23
+	POP	r22
+	POP	r21
+	POP	r20
+	POP	r19
+	POP	r18
+	POP	r17
+	POP	r16
+	POP	r15
+	POP	r14
+	POP	r13
+.endm
+
+/*--------------------------------------------------------------
+ * Collect User Mode callee regs as struct callee_regs - needed by
+ * fork/do_signal/unaligned-access-emulation.
+ * (By default only scratch regs are saved on entry to kernel)
+ *
+ * Special handling for r25 if used for caching Task Pointer.
+ * It would have been saved in task->thread.user_r25 already, but to keep
+ * the interface same it is copied into regular r25 placeholder in
+ * struct callee_regs.
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+
+	mov	r12, sp		; save SP as ref to pt_regs
+	SAVE_R13_TO_R24
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	; Retrieve orig r25 and save it with rest of callee_regs
+	ld	r12, [r12, PT_user_r25]
+	PUSH	r12
+#else
+	PUSH	r25
+#endif
+
+.endm
+
+/*--------------------------------------------------------------
+ * Save kernel Mode callee regs at the time of Contect Switch.
+ *
+ * Special handling for r25 if used for caching Task Pointer.
+ * Kernel simply skips saving it since it will be loaded with
+ * incoming task pointer anyways
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+
+	SAVE_R13_TO_R24
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	sub     sp, sp, 4
+#else
+	PUSH	r25
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Opposite of SAVE_CALLEE_SAVED_KERNEL
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	add     sp, sp, 4  /* skip usual r25 placeholder */
+#else
+	POP	r25
+#endif
+	RESTORE_R24_TO_R13
+.endm
+
+/*--------------------------------------------------------------
+ * Opposite of SAVE_CALLEE_SAVED_USER
+ *
+ * ptrace tracer or unaligned-access fixup might have changed a user mode
+ * callee reg which is saved back to usual r25 storage location
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	POP	r12
+#else
+	POP	r25
+#endif
+	RESTORE_R24_TO_R13
+
+	; SP is back to start of pt_regs
+#ifdef CONFIG_ARC_CURR_IN_REG
+	st	r12, [sp, PT_user_r25]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+	add     sp, sp, SZ_CALLEE_REGS
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+	/* Get task->thread_info (this is essentially start of a PAGE) */
+	ld  \out, [\tsk, TASK_THREAD_INFO]
+
+	/* Go to end of page where stack begins (grows upwards) */
+	add2 \out, \out, (THREAD_SIZE)/4
+
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS  reg
+	GET_CURR_THR_INFO_FROM_SP  \reg
+	ld  \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro  GET_CURR_TASK_ON_CPU   reg
+	GET_CPU_ID  \reg
+	ld.as  \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while   LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro  SET_CURR_TASK_ON_CPU    tsk, tmp
+	GET_CPU_ID  \tmp
+	add2 \tmp, @_current_task, \tmp
+	st   \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+	mov r25, \tsk
+#endif
+
+.endm
+
+
+#else   /* Uniprocessor implementation of macros */
+
+.macro  GET_CURR_TASK_ON_CPU    reg
+	ld  \reg, [@_current_task]
+.endm
+
+.macro  SET_CURR_TASK_ON_CPU    tsk, tmp
+	st  \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+	mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ *  -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR  off,  reg
+	add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR  off,  reg
+	GET_CURR_TASK_ON_CPU  \reg
+	add \reg, \reg, \off
+.endm
+
+#endif	/* CONFIG_ARC_CURR_IN_REG */
+
+#endif  /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 0000000..28abc69
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 0000000..11e1b1f
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+							\
+	smp_mb();					\
+	__asm__ __volatile__(				\
+	"1:	llock	%1, [%2]		\n"	\
+		insn				"\n"	\
+	"2:	scond	%0, [%2]		\n"	\
+	"	bnz	1b			\n"	\
+	"	mov %0, 0			\n"	\
+	"3:					\n"	\
+	"	.section .fixup,\"ax\"		\n"	\
+	"	.align  4			\n"	\
+	"4:	mov %0, %4			\n"	\
+	"	j   3b				\n"	\
+	"	.previous			\n"	\
+	"	.section __ex_table,\"a\"	\n"	\
+	"	.align  4			\n"	\
+	"	.word   1b, 4b			\n"	\
+	"	.word   2b, 4b			\n"	\
+	"	.previous			\n"	\
+							\
+	: "=&r" (ret), "=&r" (oldval)			\
+	: "r" (uaddr), "r" (oparg), "ir" (-EFAULT)	\
+	: "cc", "memory");				\
+	smp_mb()					\
+
+#else	/* !CONFIG_ARC_HAS_LLSC */
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+							\
+	smp_mb();					\
+	__asm__ __volatile__(				\
+	"1:	ld	%1, [%2]		\n"	\
+		insn				"\n"	\
+	"2:	st	%0, [%2]		\n"	\
+	"	mov %0, 0			\n"	\
+	"3:					\n"	\
+	"	.section .fixup,\"ax\"		\n"	\
+	"	.align  4			\n"	\
+	"4:	mov %0, %4			\n"	\
+	"	j   3b				\n"	\
+	"	.previous			\n"	\
+	"	.section __ex_table,\"a\"	\n"	\
+	"	.align  4			\n"	\
+	"	.word   1b, 4b			\n"	\
+	"	.word   2b, 4b			\n"	\
+	"	.previous			\n"	\
+							\
+	: "=&r" (ret), "=&r" (oldval)			\
+	: "r" (uaddr), "r" (oparg), "ir" (-EFAULT)	\
+	: "cc", "memory");				\
+	smp_mb()					\
+
+#endif
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+	int op = (encoded_op >> 28) & 7;
+	int cmp = (encoded_op >> 24) & 15;
+	int oparg = (encoded_op << 8) >> 20;
+	int cmparg = (encoded_op << 20) >> 20;
+	int oldval = 0, ret;
+
+	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+		oparg = 1 << oparg;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+		return -EFAULT;
+
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_disable();	/* to guarantee atomic r-m-w of futex op */
+#endif
+	pagefault_disable();
+
+	switch (op) {
+	case FUTEX_OP_SET:
+		__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ADD:
+		/* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */
+		__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_OR:
+		__futex_atomic_op("or  %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ANDN:
+		__futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_XOR:
+		__futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	pagefault_enable();
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_enable();
+#endif
+
+	if (!ret) {
+		switch (cmp) {
+		case FUTEX_OP_CMP_EQ:
+			ret = (oldval == cmparg);
+			break;
+		case FUTEX_OP_CMP_NE:
+			ret = (oldval != cmparg);
+			break;
+		case FUTEX_OP_CMP_LT:
+			ret = (oldval < cmparg);
+			break;
+		case FUTEX_OP_CMP_GE:
+			ret = (oldval >= cmparg);
+			break;
+		case FUTEX_OP_CMP_LE:
+			ret = (oldval <= cmparg);
+			break;
+		case FUTEX_OP_CMP_GT:
+			ret = (oldval > cmparg);
+			break;
+		default:
+			ret = -ENOSYS;
+		}
+	}
+	return ret;
+}
+
+/*
+ * cmpxchg of futex (pagefaults disabled by caller)
+ * Return 0 for success, -EFAULT otherwise
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval,
+			      u32 newval)
+{
+	int ret = 0;
+	u32 existval;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_disable();	/* to guarantee atomic r-m-w of futex op */
+#endif
+	smp_mb();
+
+	__asm__ __volatile__(
+#ifdef CONFIG_ARC_HAS_LLSC
+	"1:	llock	%1, [%4]		\n"
+	"	brne	%1, %2, 3f		\n"
+	"2:	scond	%3, [%4]		\n"
+	"	bnz	1b			\n"
+#else
+	"1:	ld	%1, [%4]		\n"
+	"	brne	%1, %2, 3f		\n"
+	"2:	st	%3, [%4]		\n"
+#endif
+	"3:	\n"
+	"	.section .fixup,\"ax\"	\n"
+	"4:	mov %0, %5	\n"
+	"	j   3b	\n"
+	"	.previous	\n"
+	"	.section __ex_table,\"a\"	\n"
+	"	.align  4	\n"
+	"	.word   1b, 4b	\n"
+	"	.word   2b, 4b	\n"
+	"	.previous\n"
+	: "+&r"(ret), "=&r"(existval)
+	: "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+	: "cc", "memory");
+
+	smp_mb();
+
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_enable();
+#endif
+	*uval = existval;
+	return ret;
+}
+
+#endif
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
new file mode 100644
index 0000000..b1585c9
--- /dev/null
+++ b/arch/arc/include/asm/highmem.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef CONFIG_HIGHMEM
+
+#include <uapi/asm/page.h>
+#include <asm/kmap_types.h>
+
+/* start after vmalloc area */
+#define FIXMAP_BASE		(PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+#define FIXMAP_SIZE		PGDIR_SIZE	/* only 1 PGD worth */
+#define KM_TYPE_NR		((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
+#define FIXMAP_ADDR(nr)		(FIXMAP_BASE + ((nr) << PAGE_SHIFT))
+
+/* start after fixmap area */
+#define PKMAP_BASE		(FIXMAP_BASE + FIXMAP_SIZE)
+#define PKMAP_SIZE		PGDIR_SIZE
+#define LAST_PKMAP		(PKMAP_SIZE >> PAGE_SHIFT)
+#define LAST_PKMAP_MASK		(LAST_PKMAP - 1)
+#define PKMAP_ADDR(nr)		(PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt)		(((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+
+#define kmap_prot		PAGE_KERNEL
+
+
+#include <asm/cacheflush.h>
+
+extern void *kmap(struct page *page);
+extern void *kmap_high(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void kunmap_high(struct page *page);
+
+extern void kmap_init(void);
+
+static inline void flush_cache_kmaps(void)
+{
+	flush_cache_all();
+}
+
+static inline void kunmap(struct page *page)
+{
+	BUG_ON(in_interrupt());
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page);
+}
+
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
new file mode 100644
index 0000000..c5094de
--- /dev/null
+++ b/arch/arc/include/asm/hugepage.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#ifndef _ASM_ARC_HUGEPAGE_H
+#define _ASM_ARC_HUGEPAGE_H
+
+#include <linux/types.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+	return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+	return __pmd(pte_val(pte));
+}
+
+#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkhuge(pmd)		pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd)	pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd)	pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
+
+#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
+#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
+#define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
+#define pmd_special(pmd)	pte_special(pmd_pte(pmd))
+
+#define mk_pmd(page, prot)	pte_pmd(mk_pte(page, prot))
+
+#define pmd_trans_huge(pmd)	(pmd_val(pmd) & _PAGE_HW_SZ)
+#define pmd_trans_splitting(pmd)	(pmd_trans_huge(pmd) && pmd_special(pmd))
+
+#define pfn_pmd(pfn, prot)	(__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+        /*
+         * open-coded pte_modify() with additional retaining of HW_SZ bit
+         * so that pmd_trans_huge() remains true for this PMD
+         */
+        return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+			      pmd_t *pmdp, pmd_t pmd)
+{
+	*pmdp = pmd;
+}
+
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+				 pmd_t *pmd);
+
+#define has_transparent_hugepage() 1
+
+/* Generic variants assume pgtable_t is struct page *, hence need for these */
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+				       pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+				unsigned long end);
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 0000000..cb69299
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb()		rmb()
+#define __iowmb()		wmb()
+#else
+#define __iormb()		do { } while (0)
+#define __iowmb()		do { } while (0)
+#endif
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+				  unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz)	ioremap(phy, sz)
+#define ioremap_wc(phy, sz)		ioremap(phy, sz)
+#define ioremap_wt(phy, sz)		ioremap(phy, sz)
+
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
+/* Change struct page to physical address */
+#define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+	u8 b;
+
+	__asm__ __volatile__(
+	"	ldb%U1 %0, %1	\n"
+	: "=r" (b)
+	: "m" (*(volatile u8 __force *)addr)
+	: "memory");
+
+	return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+	u16 s;
+
+	__asm__ __volatile__(
+	"	ldw%U1 %0, %1	\n"
+	: "=r" (s)
+	: "m" (*(volatile u16 __force *)addr)
+	: "memory");
+
+	return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+	u32 w;
+
+	__asm__ __volatile__(
+	"	ld%U1 %0, %1	\n"
+	: "=r" (w)
+	: "m" (*(volatile u32 __force *)addr)
+	: "memory");
+
+	return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+	__asm__ __volatile__(
+	"	stb%U1 %0, %1	\n"
+	:
+	: "r" (b), "m" (*(volatile u8 __force *)addr)
+	: "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+	__asm__ __volatile__(
+	"	stw%U1 %0, %1	\n"
+	:
+	: "r" (s), "m" (*(volatile u16 __force *)addr)
+	: "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+	__asm__ __volatile__(
+	"	st%U1 %0, %1	\n"
+	:
+	: "r" (w), "m" (*(volatile u32 __force *)addr)
+	: "memory");
+
+}
+
+/*
+ * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+ * Based on ARM model for the typical use case
+ *
+ *	<ST [DMA buffer]>
+ *	<writel MMIO "go" reg>
+ *  or:
+ *	<readl MMIO "status" reg>
+ *	<LD [DMA buffer]>
+ *
+ * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
+ */
+#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
+
+/*
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
+ */
+#define readb_relaxed(c)	__raw_readb(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+					__raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+					__raw_readl(c)); __r; })
+
+#define writeb_relaxed(v,c)	__raw_writeb(v,c)
+#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 0000000..4fd7d62
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_CPU_IRQS	32  /* number of interrupt lines of ARC770 CPU */
+#define NR_IRQS		128 /* allow some CPU external IRQ handling */
+
+/* Platform Independent IRQs */
+#ifdef CONFIG_ISA_ARCOMPACT
+#define TIMER0_IRQ      3
+#define TIMER1_IRQ      4
+#define IPI_IRQ		(NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
+#else
+#define TIMER0_IRQ      16
+#define TIMER1_IRQ      17
+#define IPI_IRQ         19
+#endif
+
+#include <linux/interrupt.h>
+#include <asm-generic/irq.h>
+
+extern void arc_init_IRQ(void);
+void arc_local_timer_setup(void);
+void arc_request_percpu_irq(int irq, int cpu,
+                            irqreturn_t (*isr)(int irq, void *dev),
+                            const char *irq_nm, void *percpu_dev);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
new file mode 100644
index 0000000..68b6092
--- /dev/null
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCV2_H
+#define __ASM_IRQFLAGS_ARCV2_H
+
+#include <asm/arcregs.h>
+
+/* status32 Bits */
+#define STATUS_AD_BIT	19   /* Disable Align chk: core supports non-aligned */
+#define STATUS_IE_BIT	31
+
+#define STATUS_AD_MASK		(1<<STATUS_AD_BIT)
+#define STATUS_IE_MASK		(1<<STATUS_IE_BIT)
+
+#define AUX_USER_SP		0x00D
+#define AUX_IRQ_CTRL		0x00E
+#define AUX_IRQ_ACT		0x043	/* Active Intr across all levels */
+#define AUX_IRQ_LVL_PEND	0x200	/* Pending Intr across all levels */
+#define AUX_IRQ_HINT		0x201	/* For generating Soft Interrupts */
+#define AUX_IRQ_PRIORITY	0x206
+#define ICAUSE			0x40a
+#define AUX_IRQ_SELECT		0x40b
+#define AUX_IRQ_ENABLE		0x40c
+
+/* Was Intr taken in User Mode */
+#define AUX_IRQ_ACT_BIT_U	31
+
+/* 0 is highest level, but taken by FIRQs, if present in design */
+#define ARCV2_IRQ_DEF_PRIO		0
+
+/* seed value for status register */
+#define ISA_INIT_STATUS_BITS	(STATUS_IE_MASK | STATUS_AD_MASK | \
+					(ARCV2_IRQ_DEF_PRIO << 1))
+
+/* SLEEP needs default irq priority (<=) which can interrupt the doze */
+#define ISA_SLEEP_ARG		(0x10 | ARCV2_IRQ_DEF_PRIO)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+	unsigned long flags;
+
+	__asm__ __volatile__("	clri %0	\n" : "=r" (flags) : : "memory");
+
+	return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	__asm__ __volatile__("	seti %0	\n" : : "r" (flags) : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+static inline void arch_local_irq_enable(void)
+{
+	unsigned int irqact = read_aux_reg(AUX_IRQ_ACT);
+
+	if (irqact & 0xffff)
+		write_aux_reg(AUX_IRQ_ACT, irqact & ~0xffff);
+
+	__asm__ __volatile__("	seti	\n" : : : "memory");
+}
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+	__asm__ __volatile__("	clri	\n" : : : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"	lr  %0, [status32]	\n"
+	: "=&r"(temp)
+	:
+	: "memory");
+
+	return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & (STATUS_IE_MASK));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arc_softirq_trigger(int irq)
+{
+	write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+	write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
+#else
+
+.macro IRQ_DISABLE  scratch
+	clri
+.endm
+
+.macro IRQ_ENABLE  scratch
+	seti
+.endm
+
+#endif	/* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
new file mode 100644
index 0000000..4c6eed8
--- /dev/null
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCOMPACT_H
+#define __ASM_IRQFLAGS_ARCOMPACT_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ *  -Remove explicit mov of current status32 into reg, that is not needed
+ *  -Use BIC  insn instead of INVERTED + AND
+ *  -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#include <asm/arcregs.h>
+
+/* status32 Reg bits related to Interrupt Handling */
+#define STATUS_E1_BIT		1	/* Int 1 enable */
+#define STATUS_E2_BIT		2	/* Int 2 enable */
+#define STATUS_A1_BIT		3	/* Int 1 active */
+#define STATUS_A2_BIT		4	/* Int 2 active */
+#define STATUS_AE_BIT		5	/* Exception active */
+
+#define STATUS_E1_MASK		(1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK		(1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK		(1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK		(1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
+#define STATUS_IE_MASK		(STATUS_E1_MASK | STATUS_E2_MASK)
+
+/* Other Interrupt Handling related Aux regs */
+#define AUX_IRQ_LEV		0x200	/* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT		0x201	/* For generating Soft Interrupts */
+#define AUX_IRQ_LV12		0x43	/* interrupt level register */
+
+#define AUX_IENABLE		0x40c
+#define AUX_ITRIGGER		0x40d
+#define AUX_IPULSE		0x415
+
+#define ISA_INIT_STATUS_BITS	STATUS_IE_MASK
+
+#define ISA_SLEEP_ARG		0x3
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ *	Orig Bug + Rejected solution	: https://lkml.org/lkml/2013/3/29/67
+ *	Reasoning			: https://lkml.org/lkml/2013/4/8/15
+ *
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+	unsigned long temp, flags;
+
+	__asm__ __volatile__(
+	"	lr  %1, [status32]	\n"
+	"	bic %0, %1, %2		\n"
+	"	and.f 0, %1, %2	\n"
+	"	flag.nz %0		\n"
+	: "=r"(temp), "=r"(flags)
+	: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+	: "memory", "cc");
+
+	return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+	__asm__ __volatile__(
+	"	flag %0			\n"
+	:
+	: "r"(flags)
+	: "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+static inline void arch_local_irq_enable(void)
+{
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"	lr   %0, [status32]	\n"
+	"	or   %0, %0, %1		\n"
+	"	flag %0			\n"
+	: "=&r"(temp)
+	: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+	: "cc", "memory");
+}
+
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"	lr  %0, [status32]	\n"
+	"	and %0, %0, %1		\n"
+	"	flag %0			\n"
+	: "=&r"(temp)
+	: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+	: "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"	lr  %0, [status32]	\n"
+	: "=&r"(temp)
+	:
+	: "memory");
+
+	return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+			| STATUS_E2_MASK
+#endif
+		));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+	bl	trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+	bl	trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
+.macro IRQ_DISABLE  scratch
+	lr	\scratch, [status32]
+	bic	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+	flag	\scratch
+	TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE  scratch
+	TRACE_ASM_IRQ_ENABLE
+	lr	\scratch, [status32]
+	or	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+	flag	\scratch
+.endm
+
+#endif	/* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 0000000..59bc6a6
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/irqflags-compact.h>
+#else
+#include <asm/irqflags-arcv2.h>
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 0000000..3fbe6c4
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+	DIE_UNUSED,
+	DIE_TRAP,
+	DIE_IERR,
+	DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 0000000..fea9316
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,63 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/ptrace.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS		87
+
+#define BREAK_INSTR_SIZE	2
+#define CACHE_FLUSH_IS_SAFE	1
+#define NUMREGBYTES		(GDB_MAX_REGS * 4)
+#define BUFMAX			2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+	__asm__ __volatile__ ("trap_s	0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs);
+
+/* This is the numbering of registers according to the GDB. See GDB's
+ * arc-tdep.h for details.
+ *
+ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
+enum arc_linux_regnums {
+	_R0		= 0,
+	_R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+	_R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+	_R25, _R26,
+	_FP		= 27,
+	__SP		= 28,
+	_R30		= 30,
+	_BLINK		= 31,
+	_LP_COUNT	= 60,
+	_STOP_PC	= 64,
+	_RET		= 64,
+	_LP_START	= 65,
+	_LP_END		= 66,
+	_STATUS32	= 67,
+	_ECR		= 76,
+	_BTA		= 82,
+};
+
+#else
+#define kgdb_trap(regs)
+#endif
+
+#endif	/* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h
new file mode 100644
index 0000000..f0d7f6a
--- /dev/null
+++ b/arch/arc/include/asm/kmap_types.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+/*
+ * We primarily need to define KM_TYPE_NR here but that in turn
+ * is a function of PGDIR_SIZE etc.
+ * To avoid circular deps issue, put everything in asm/highmem.h
+ */
+#endif
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 0000000..944dbed
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE   8
+#define MAX_STACK_SIZE  64
+
+struct arch_specific_insn {
+	int is_short;
+	kprobe_opcode_t *t1_addr, *t2_addr;
+	kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p)  do {  } while (0)
+
+#define kretprobe_blacklist_size    0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+			     unsigned long val, void *data);
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+};
+
+struct kprobe_ctlblk {
+	unsigned int kprobe_status;
+	struct pt_regs jprobe_saved_regs;
+	char jprobes_stack[MAX_STACK_SIZE];
+	struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 0000000..5faad17
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+#define ASM_NL		 `	/* use '`' to mark new line in macro */
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+	.section .data.arcfp
+#else
+	.section .data
+#endif
+	.global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+	.section .text.arcfp, "ax",@progbits
+#else
+	.section .text, "ax",@progbits
+#endif
+.endm
+
+#else	/* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 0000000..c28e6c3
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ *	Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ *	a multi-platform kernel builds with array of such descriptors.
+ *	We extend the early DT scan to also match the DT's "compatible" string
+ *	against the @dt_compat of all such descriptors, and one with highest
+ *	"DT score" is selected as global @machine_desc.
+ *
+ * @name:		Board/SoC name
+ * @dt_compat:		Array of device tree 'compatible' strings
+ * 			(XXX: although only 1st entry is looked at)
+ * @init_early:		Very early callback [called from setup_arch()]
+ * @init_per_cpu:	for each CPU as it is coming up (SMP as well as UP)
+ * 			[(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_machine:	arch initcall level callback (e.g. populate static
+ * 			platform devices or parse Devicetree)
+ * @init_late:		Late initcall level callback
+ *
+ */
+struct machine_desc {
+	const char		*name;
+	const char		**dt_compat;
+	void			(*init_early)(void);
+#ifdef CONFIG_SMP
+	void			(*init_per_cpu)(unsigned int);
+#endif
+	void			(*init_machine)(void);
+	void			(*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern const struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name)			\
+static const struct machine_desc __mach_desc_##_type	\
+__used							\
+__attribute__((__section__(".arch.info.init"))) = {	\
+	.name		= _name,
+
+#define MACHINE_END				\
+};
+
+extern const struct machine_desc *setup_machine_fdt(void *dt);
+
+#endif
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
new file mode 100644
index 0000000..46f4e53
--- /dev/null
+++ b/arch/arc/include/asm/mcip.h
@@ -0,0 +1,91 @@
+/*
+ * ARConnect IP Support (Multi core enabler: Cross core IPI, RTC ...)
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MCIP_H
+#define __ASM_MCIP_H
+
+#ifdef CONFIG_ISA_ARCV2
+
+#include <asm/arcregs.h>
+
+#define ARC_REG_MCIP_BCR	0x0d0
+#define ARC_REG_MCIP_CMD	0x600
+#define ARC_REG_MCIP_WDATA	0x601
+#define ARC_REG_MCIP_READBACK	0x602
+
+struct mcip_cmd {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:8, param:16, cmd:8;
+#else
+	unsigned int cmd:8, param:16, pad:8;
+#endif
+
+#define CMD_INTRPT_GENERATE_IRQ		0x01
+#define CMD_INTRPT_GENERATE_ACK		0x02
+#define CMD_INTRPT_READ_STATUS		0x03
+#define CMD_INTRPT_CHECK_SOURCE		0x04
+
+/* Semaphore Commands */
+#define CMD_SEMA_CLAIM_AND_READ		0x11
+#define CMD_SEMA_RELEASE		0x12
+
+#define CMD_DEBUG_SET_MASK		0x34
+#define CMD_DEBUG_SET_SELECT		0x36
+
+#define CMD_GRTC_READ_LO		0x42
+#define CMD_GRTC_READ_HI		0x43
+
+#define CMD_IDU_ENABLE			0x71
+#define CMD_IDU_DISABLE			0x72
+#define CMD_IDU_SET_MODE		0x74
+#define CMD_IDU_SET_DEST		0x76
+#define CMD_IDU_SET_MASK		0x7C
+
+#define IDU_M_TRIG_LEVEL		0x0
+#define IDU_M_TRIG_EDGE			0x1
+
+#define IDU_M_DISTRI_RR			0x0
+#define IDU_M_DISTRI_DEST		0x2
+};
+
+/*
+ * MCIP programming model
+ *
+ * - Simple commands write {cmd:8,param:16} to MCIP_CMD aux reg
+ *   (param could be irq, common_irq, core_id ...)
+ * - More involved commands setup MCIP_WDATA with cmd specific data
+ *   before invoking the simple command
+ */
+static inline void __mcip_cmd(unsigned int cmd, unsigned int param)
+{
+	struct mcip_cmd buf;
+
+	buf.pad = 0;
+	buf.cmd = cmd;
+	buf.param = param;
+
+	WRITE_AUX(ARC_REG_MCIP_CMD, buf);
+}
+
+/*
+ * Setup additional data for a cmd
+ * Callers need to lock to ensure atomicity
+ */
+static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
+				   unsigned int data)
+{
+	write_aux_reg(ARC_REG_MCIP_WDATA, data);
+
+	__mcip_cmd(cmd, param);
+}
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 0000000..b144d7c
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#elif defined(CONFIG_ARC_MMU_V4)
+#define CONFIG_ARC_MMU_VER 4
+#endif
+
+/* MMU Management regs */
+#define ARC_REG_MMU_BCR		0x06f
+#if (CONFIG_ARC_MMU_VER < 4)
+#define ARC_REG_TLBPD0		0x405
+#define ARC_REG_TLBPD1		0x406
+#define ARC_REG_TLBPD1HI	0	/* Dummy: allows code sharing with ARC700 */
+#define ARC_REG_TLBINDEX	0x407
+#define ARC_REG_TLBCOMMAND	0x408
+#define ARC_REG_PID		0x409
+#define ARC_REG_SCRATCH_DATA0	0x418
+#else
+#define ARC_REG_TLBPD0		0x460
+#define ARC_REG_TLBPD1		0x461
+#define ARC_REG_TLBPD1HI	0x463
+#define ARC_REG_TLBINDEX	0x464
+#define ARC_REG_TLBCOMMAND	0x465
+#define ARC_REG_PID		0x468
+#define ARC_REG_SCRATCH_DATA0	0x46c
+#endif
+
+/* Bits in MMU PID register */
+#define __TLB_ENABLE		(1 << 31)
+#define __PROG_ENABLE		(1 << 30)
+#define MMU_ENABLE		(__TLB_ENABLE | __PROG_ENABLE)
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR		0x80000000
+
+#if (CONFIG_ARC_MMU_VER < 4)
+#define TLB_DUP_ERR	(TLB_LKUP_ERR | 0x00000001)
+#else
+#define TLB_DUP_ERR	(TLB_LKUP_ERR | 0x40000000)
+#endif
+
+/* TLB Commands */
+#define TLBWrite    0x1
+#define TLBRead     0x2
+#define TLBGetIndex 0x3
+#define TLBProbe    0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI  0x5		/* write JTLB without inv uTLBs */
+#define TLBIVUTLB   0x6		/* explicitly inv uTLBs */
+#endif
+
+#if (CONFIG_ARC_MMU_VER >= 4)
+#define TLBInsertEntry	0x7
+#define TLBDeleteEntry	0x8
+#endif
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+	unsigned long asid[NR_CPUS];	/* 8 bit MMU PID + Generation cycle */
+} mm_context_t;
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int mm_asid, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void read_decode_mmu_bcr(void);
+
+static inline int is_pae40_enabled(void)
+{
+	return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+}
+
+#endif	/* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 0000000..1fd467e
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Refactored get_new_mmu_context( ) to only handle live-mm.
+ *   retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ *  -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/*		ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cpu.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
+ */
+
+#define MM_CTXT_ASID_MASK	0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK	(~MM_CTXT_ASID_MASK)
+
+#define MM_CTXT_FIRST_CYCLE	(MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID		0UL
+
+#define asid_mm(mm, cpu)	mm->context.asid[cpu]
+#define hw_pid(mm, cpu)		(asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
+
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu)		per_cpu(asid_cache, cpu)
+
+/*
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+	const unsigned int cpu = smp_processor_id();
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	/*
+	 * Move to new ASID if it was not from current alloc-cycle/generation.
+	 * This is done by ensuring that the generation bits in both mm->ASID
+	 * and cpu's ASID counter are exactly same.
+	 *
+	 * Note: Callers needing new ASID unconditionally, independent of
+	 * 	 generation, e.g. local_flush_tlb_mm() for forking  parent,
+	 * 	 first need to destroy the context, setting it to invalid
+	 * 	 value.
+	 */
+	if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
+		goto set_hw;
+
+	/* move to new ASID and handle rollover */
+	if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
+
+		local_flush_tlb_all();
+
+		/*
+		 * Above checke for rollover of 8 bit ASID in 32 bit container.
+		 * If the container itself wrapped around, set it to a non zero
+		 * "generation" to distinguish from no context
+		 */
+		if (!asid_cpu(cpu))
+			asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
+	}
+
+	/* Assign new ASID to tsk */
+	asid_mm(mm, cpu) = asid_cpu(cpu);
+
+set_hw:
+	write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	int i;
+
+	for_each_possible_cpu(i)
+		asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
+	return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+	unsigned long flags;
+
+	/* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+	local_irq_save(flags);
+	asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+	local_irq_restore(flags);
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+    If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	const int cpu = smp_processor_id();
+
+	/*
+	 * Note that the mm_cpumask is "aggregating" only, we don't clear it
+	 * for the switched-out task, unlike some other arches.
+	 * It is used to enlist cpus for sending TLB flush IPIs and not sending
+	 * it to CPUs where a task once ran-on, could cause stale TLB entry
+	 * re-use, specially for a multi-threaded task.
+	 * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+	 *      For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+	 *      were to re-migrate to C1, it could access the unmapped region
+	 *      via any existing stale TLB entries.
+	 */
+	cpumask_set_cpu(cpu, mm_cpumask(next));
+
+#ifndef CONFIG_SMP
+	/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+	get_new_mmu_context(next);
+}
+
+/*
+ * Called at the time of execve() to get a new ASID
+ * Note the subtlety here: get_new_mmu_context() behaves differently here
+ * vs. in switch_mm(). Here it always returns a new ASID, because mm has
+ * an unallocated "initial" value, while in latter, it moves to a new ASID,
+ * only if it was unallocated
+ */
+#define activate_mm(prev, next)		switch_mm(prev, next, NULL)
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm)   do { } while (0)
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 0000000..518222b
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+	void *unw_info;
+	int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 0000000..a2f88ff
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 0000000..429957f
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr)		__get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr)	free_page(addr)
+
+#define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from)		memcpy((to), (from), PAGE_SIZE)
+
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+			unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+	unsigned long pte;
+} pte_t;
+typedef struct {
+	unsigned long pgd;
+} pgd_t;
+typedef struct {
+	unsigned long pgprot;
+} pgprot_t;
+
+#define pte_val(x)      ((x).pte)
+#define pgd_val(x)      ((x).pgd)
+#define pgprot_val(x)   ((x).pgprot)
+
+#define __pte(x)        ((pte_t) { (x) })
+#define __pgd(x)        ((pgd_t) { (x) })
+#define __pgprot(x)     ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+#ifdef CONFIG_ARC_HAS_PAE40
+typedef unsigned long long pte_t;
+#else
+typedef unsigned long pte_t;
+#endif
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)	(x)
+#define pgd_val(x)	(x)
+#define pgprot_val(x)	(x)
+#define __pte(x)	(x)
+#define __pgd(x)	(x)
+#define __pgprot(x)	(x)
+#define pte_pgprot(x)	(x)
+
+#endif
+
+typedef pte_t * pgtable_t;
+
+#define ARCH_PFN_OFFSET     (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn)      (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro  __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __va(paddr)  ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr)	\
+	(mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for stack/heaps pages (Non Executable) */
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+
+#define WANT_PAGE_VIRTUAL   1
+
+#include <asm-generic/memory_model.h>   /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 0000000..5f07176
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,228 @@
+/*
+ * Linux performance counter support for ARC
+ *
+ * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+/* Max number of counters that PCT block may ever have */
+#define ARC_PERF_MAX_COUNTERS	32
+
+#define ARC_REG_CC_BUILD	0xF6
+#define ARC_REG_CC_INDEX	0x240
+#define ARC_REG_CC_NAME0	0x241
+#define ARC_REG_CC_NAME1	0x242
+
+#define ARC_REG_PCT_BUILD	0xF5
+#define ARC_REG_PCT_COUNTL	0x250
+#define ARC_REG_PCT_COUNTH	0x251
+#define ARC_REG_PCT_SNAPL	0x252
+#define ARC_REG_PCT_SNAPH	0x253
+#define ARC_REG_PCT_CONFIG	0x254
+#define ARC_REG_PCT_CONTROL	0x255
+#define ARC_REG_PCT_INDEX	0x256
+#define ARC_REG_PCT_INT_CNTL	0x25C
+#define ARC_REG_PCT_INT_CNTH	0x25D
+#define ARC_REG_PCT_INT_CTRL	0x25E
+#define ARC_REG_PCT_INT_ACT	0x25F
+
+#define ARC_REG_PCT_CONFIG_USER	(1 << 18)	/* count in user mode */
+#define ARC_REG_PCT_CONFIG_KERN	(1 << 19)	/* count in kernel mode */
+
+#define ARC_REG_PCT_CONTROL_CC	(1 << 16)	/* clear counts */
+#define ARC_REG_PCT_CONTROL_SN	(1 << 17)	/* snapshot */
+
+struct arc_reg_pct_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int m:8, c:8, r:5, i:1, s:2, v:8;
+#else
+	unsigned int v:8, s:2, i:1, r:5, c:8, m:8;
+#endif
+};
+
+struct arc_reg_cc_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int c:16, r:8, v:8;
+#else
+	unsigned int v:8, r:8, c:16;
+#endif
+};
+
+#define PERF_COUNT_ARC_DCLM	(PERF_COUNT_HW_MAX + 0)
+#define PERF_COUNT_ARC_DCSM	(PERF_COUNT_HW_MAX + 1)
+#define PERF_COUNT_ARC_ICM	(PERF_COUNT_HW_MAX + 2)
+#define PERF_COUNT_ARC_BPOK	(PERF_COUNT_HW_MAX + 3)
+#define PERF_COUNT_ARC_EDTLB	(PERF_COUNT_HW_MAX + 4)
+#define PERF_COUNT_ARC_EITLB	(PERF_COUNT_HW_MAX + 5)
+#define PERF_COUNT_ARC_LDC	(PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_STC	(PERF_COUNT_HW_MAX + 7)
+
+#define PERF_COUNT_ARC_HW_MAX	(PERF_COUNT_HW_MAX + 8)
+
+/*
+ * Some ARC pct quirks:
+ *
+ * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ *	The ARC 700 can either measure stalls per pipeline stage, or all stalls
+ *	combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
+ *	and all pipeline flushes (e.g. caused by mispredicts, etc.) to
+ *	STALLED_CYCLES_FRONTEND.
+ *
+ *	We could start multiple performance counters and combine everything
+ *	afterwards, but that makes it complicated.
+ *
+ *	Note that I$ cache misses aren't counted by either of the two!
+ */
+
+/*
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
+ * (based on a specific RTL build)
+ * Below is the static map between perf generic/arc specific event_id and
+ * h/w condition names.
+ * At the time of probe, we loop thru each index and find it's name to
+ * complete the mapping of perf event_id to h/w index as latter is needed
+ * to program the counter really
+ */
+static const char * const arc_pmu_ev_hw_map[] = {
+	/* count cycles */
+	[PERF_COUNT_HW_CPU_CYCLES] = "crun",
+	[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
+	[PERF_COUNT_HW_BUS_CYCLES] = "crun",
+
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
+
+	/* counts condition */
+	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
+#ifdef CONFIG_ISA_ARCV2
+	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
+#else
+	[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
+#endif
+	[PERF_COUNT_ARC_LDC] = "imemrdc",	/* Instr: mem read cached */
+	[PERF_COUNT_ARC_STC] = "imemwrc",	/* Instr: mem write cached */
+
+	[PERF_COUNT_ARC_DCLM] = "dclm",		/* D-cache Load Miss */
+	[PERF_COUNT_ARC_DCSM] = "dcsm",		/* D-cache Store Miss */
+	[PERF_COUNT_ARC_ICM] = "icm",		/* I-cache Miss */
+	[PERF_COUNT_ARC_EDTLB] = "edtlb",	/* D-TLB Miss */
+	[PERF_COUNT_ARC_EITLB] = "eitlb",	/* I-TLB Miss */
+};
+
+#define C(_x)			PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED	0xffff
+
+static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= PERF_COUNT_ARC_LDC,
+			[C(RESULT_MISS)]	= PERF_COUNT_ARC_DCLM,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= PERF_COUNT_ARC_STC,
+			[C(RESULT_MISS)]	= PERF_COUNT_ARC_DCSM,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= PERF_COUNT_HW_INSTRUCTIONS,
+			[C(RESULT_MISS)]	= PERF_COUNT_ARC_ICM,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= PERF_COUNT_ARC_LDC,
+			[C(RESULT_MISS)]	= PERF_COUNT_ARC_EDTLB,
+		},
+			/* DTLB LD/ST Miss not segregated by h/w*/
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= PERF_COUNT_ARC_EITLB,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+			[C(RESULT_MISS)]	= PERF_COUNT_HW_BRANCH_MISSES,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 0000000..86ed671
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ *  -"/proc/meminfo | grep PageTables" kept on increasing
+ *   Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ *  -Variable pg-sz means that Page Tables could be variable sized themselves
+ *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ *  -Page Table size capped to max 1 to save memory - hence verified.
+ *  -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ *  -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ *  -Switched pgtable_t from being struct page * to unsigned long
+ *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ *       to deal with struct page. Thay way in future we can make it allocate
+ *       multiple PG Tbls in one Page Frame
+ *      =sweet side effect is avoiding calls to ugly page_address( ) from the
+ *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+	pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+	pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+	return get_order(PTRS_PER_PGD * sizeof(pgd_t));
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	int num, num2;
+	pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+	if (ret) {
+		num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+		memzero(ret, num * sizeof(pgd_t));
+
+		num2 = VMALLOC_SIZE / PGDIR_SIZE;
+		memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+		memzero(ret + num + num2,
+			       (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+	}
+	return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+	return get_order(PTRS_PER_PTE * sizeof(pte_t));
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					unsigned long address)
+{
+	pte_t *pte;
+
+	pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+					 __get_order_pte());
+
+	return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	pgtable_t pte_pg;
+	struct page *page;
+
+	pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+	if (!pte_pg)
+		return 0;
+	memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
+	page = virt_to_page(pte_pg);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return 0;
+	}
+
+	return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+	pgtable_page_dtor(virt_to_page(ptep));
+	free_pages((unsigned long)ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache()   do { } while (0)
+#define pmd_pgtable(pmd)	((pgtable_t) pmd_page_vaddr(pmd))
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 0000000..e5fec32
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ *     They are semantically the same although in different contexts
+ *     VALID marks a TLB entry exists and it will only happen if PRESENT
+ *  - Utilise some unused free bits to confine PTE flags to 12 bits
+ *     This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ *  -TLB Locking never really existed, except for initial specs
+ *  -SILENT_xxx not needed for our port
+ *  -Per my request, MMU V3 changes the layout of some of the bits
+ *     to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ *  -Switched form 8:11:13 split for page table lookup to 11:8:13
+ *  -this speeds up page table allocation itself as we now have to memset 1K
+ *    instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ *    need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+#include <linux/const.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ *      seperate PD0 and PD1, which combined forms a translation entry)
+ *      while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ *      (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED      (1<<1)	/* Page is accessed (S) */
+#define _PAGE_CACHEABLE     (1<<2)	/* Page is cached (H) */
+#define _PAGE_EXECUTE       (1<<3)	/* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<4)	/* Page has user write perm (H) */
+#define _PAGE_READ          (1<<5)	/* Page has user read perm (H) */
+#define _PAGE_DIRTY         (1<<6)	/* Page modified (dirty) (S) */
+#define _PAGE_SPECIAL       (1<<7)
+#define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
+#define _PAGE_PRESENT       (1<<10)	/* TLB entry is valid (H) */
+
+#else	/* MMU v3 onwards */
+
+#define _PAGE_CACHEABLE     (1<<0)	/* Page is cached (H) */
+#define _PAGE_EXECUTE       (1<<1)	/* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<2)	/* Page has user write perm (H) */
+#define _PAGE_READ          (1<<3)	/* Page has user read perm (H) */
+#define _PAGE_ACCESSED      (1<<4)	/* Page is accessed (S) */
+#define _PAGE_DIRTY         (1<<5)	/* Page modified (dirty) (S) */
+#define _PAGE_SPECIAL       (1<<6)
+
+#if (CONFIG_ARC_MMU_VER >= 4)
+#define _PAGE_WTHRU         (1<<7)	/* Page cache mode write-thru (H) */
+#endif
+
+#define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
+#define _PAGE_PRESENT       (1<<9)	/* TLB entry is valid (H) */
+
+#if (CONFIG_ARC_MMU_VER >= 4)
+#define _PAGE_HW_SZ         (1<<10)	/* Page Size indicator (H): 0 normal, 1 super */
+#endif
+
+#define _PAGE_SHARED_CODE   (1<<11)	/* Shared Code page with cmn vaddr
+					   usable for shared TLB entries (H) */
+
+#define _PAGE_UNUSED_BIT    (1<<12)
+#endif
+
+/* vmalloc permissions */
+#define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+			_PAGE_GLOBAL | _PAGE_PRESENT)
+
+#ifndef CONFIG_ARC_CACHE_PAGES
+#undef _PAGE_CACHEABLE
+#define _PAGE_CACHEABLE 0
+#endif
+
+#ifndef _PAGE_HW_SZ
+#define _PAGE_HW_SZ	0
+#endif
+
+/* Defaults for every user page */
+#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE     __pgprot(___DEF)
+#define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+						       _PAGE_EXECUTE)
+
+#define PAGE_SHARED	PAGE_U_W_R
+
+/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
+ * user vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
+#define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+#else
+#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
+#endif
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ *       which directly corresponds to  PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ *  1. Although ARC700 can do exclusive execute/write protection (meaning R
+ *     can be tracked independet of X/W unlike some other CPUs), still to
+ *     keep things consistent with other archs:
+ *      -Write implies Read:   W => R
+ *      -Execute implies Read: X => R
+ *
+ *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ *     This is to enable COW mechanism
+ */
+	/* xwr */
+#define __P000  PAGE_U_NONE
+#define __P001  PAGE_U_R
+#define __P010  PAGE_U_R	/* Pvt-W => !W */
+#define __P011  PAGE_U_R	/* Pvt-W => !W */
+#define __P100  PAGE_U_X_R	/* X => R */
+#define __P101  PAGE_U_X_R
+#define __P110  PAGE_U_X_R	/* Pvt-W => !W and X => R */
+#define __P111  PAGE_U_X_R	/* Pvt-W => !W */
+
+#define __S000  PAGE_U_NONE
+#define __S001  PAGE_U_R
+#define __S010  PAGE_U_W_R	/* W => R */
+#define __S011  PAGE_U_W_R
+#define __S100  PAGE_U_X_R	/* X => R */
+#define __S101  PAGE_U_X_R
+#define __S110  PAGE_U_X_W_R	/* X => R */
+#define __S111  PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ *			32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
+ * -------------------------------------------------------
+ *       |                  |                |
+ *       |                  |                --> off in page frame
+ *       |		    |
+ *       |                  ---> index into Page Table
+ *       |
+ *       ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE	PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE	8		/* 11:8:13 */
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE	8		/* 10:8:14 */
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE	9		/* 11:9:12 */
+#endif
+
+#define BITS_FOR_PGD	(32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT	(32 - BITS_FOR_PGD)
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+#define	PTRS_PER_PTE	_BITUL(BITS_FOR_PTE)
+#define	PTRS_PER_PGD	_BITUL(BITS_FOR_PGD)
+
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define	USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS      0UL
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte)		do { } while (0)
+#define pte_unmap_nested(pte)		do { } while (0)
+
+#define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval)	(*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd)		virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+	pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x)			(!pte_val(x))
+#define pte_present(x)			(pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep)	set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x)			(!pmd_val(x))
+#define	pmd_bad(x)			((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x)			(pmd_val(x))
+#define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+		(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
+				PAGE_SHIFT)))
+
+#define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
+#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+					 __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr)		pte_offset(dir, addr)
+#define pte_offset_map(dir, addr)		pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
+#define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
+#define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte)	(pte_val(pte) & _PAGE_SPECIAL)
+
+#define PTE_BIT_FUNC(fn, op) \
+	static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(mknotpresent,	&= ~(_PAGE_PRESENT));
+PTE_BIT_FUNC(wrprotect,	&= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite,	|= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean,	&= ~(_PAGE_DIRTY));
+PTE_BIT_FUNC(mkdirty,	|= (_PAGE_DIRTY));
+PTE_BIT_FUNC(mkold,	&= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung,	|= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect,	&= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec,	|= (_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkspecial,	|= (_PAGE_SPECIAL));
+PTE_BIT_FUNC(mkhuge,	|= (_PAGE_HW_SZ));
+
+#define __HAVE_ARCH_PTE_SPECIAL
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t pteval)
+{
+	set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
+#define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr)	\
+({					\
+	pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
+	pgd_base + pgd_index(addr);	\
+})
+#else
+#define pgd_offset_fast(mm, addr)	pgd_offset(mm, addr)
+#endif
+
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+		      pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * PAGE_PRESENT is zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off)	((swp_entry_t) { \
+					((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike)	((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
+
+#define kern_addr_valid(addr)	(1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <asm/hugepage.h>
+#endif
+
+#include <asm-generic/pgtable.h>
+
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 0000000..1d694c1
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ *  -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+	struct {
+		unsigned int l, h;
+	} aux_dpfp[2];
+};
+#endif
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+	unsigned long ksp;	/* kernel mode stack pointer */
+	unsigned long callee_reg;	/* pointer to callee regs */
+	unsigned long fault_address;	/* dbls as brkpt holder as well */
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+	struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD  {                          \
+	.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+#define task_pt_regs(p) \
+	((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread */
+#define release_thread(thread) do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#define cpu_relax()	__asm__ __volatile__ ("" : : : "memory")
+
+#define cpu_relax_lowlatency() cpu_relax()
+
+#define copy_segments(tsk, mm)      do { } while (0)
+#define release_segments(mm)        do { } while (0)
+
+#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Look in process.c for details of kernel stack layout
+ */
+#define TSK_K_ESP(tsk)		(tsk->thread.ksp)
+
+#define TSK_K_REG(tsk, off)	(*((unsigned long *)(TSK_K_ESP(tsk) + \
+					sizeof(struct callee_regs) + off)))
+
+#define TSK_K_BLINK(tsk)	TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk)		TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk)	TSK_K_BLINK(tsk)
+
+extern void start_thread(struct pt_regs * regs, unsigned long pc,
+			 unsigned long usp);
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * System Memory Map on ARC
+ *
+ * ---------------------------- (lower 2G, Translated) -------------------------
+ * 0x0000_0000		0x5FFF_FFFF	(user vaddr: TASK_SIZE)
+ * 0x6000_0000		0x6FFF_FFFF	(reserved gutter between U/K)
+ * 0x7000_0000		0x7FFF_FFFF	(kvaddr: vmalloc/modules/pkmap..)
+ *
+ * PAGE_OFFSET ---------------- (Upper 2G, Untranslated) -----------------------
+ * 0x8000_0000		0xBFFF_FFFF	(kernel direct mapped)
+ * 0xC000_0000		0xFFFF_FFFF	(peripheral uncached space)
+ * -----------------------------------------------------------------------------
+ */
+#define VMALLOC_START	0x70000000
+
+/*
+ * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
+ * See asm/highmem.h for details
+ */
+#define VMALLOC_SIZE	(PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
+#define VMALLOC_END	(VMALLOC_START + VMALLOC_SIZE)
+
+#define USER_KERNEL_GUTTER    0x10000000
+
+#define TASK_SIZE	(VMALLOC_START - USER_KERNEL_GUTTER)
+
+#define STACK_TOP       TASK_SIZE
+#define STACK_TOP_MAX   STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE      (TASK_SIZE / 3)
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 0000000..47111d5
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+#ifdef CONFIG_ISA_ARCOMPACT
+struct pt_regs {
+
+	/* Real registers */
+	unsigned long bta;	/* bta_l1, bta_l2, erbta */
+
+	unsigned long lp_start, lp_end, lp_count;
+
+	unsigned long status32;	/* status32_l1, status32_l2, erstatus */
+	unsigned long ret;	/* ilink1, ilink2 or eret */
+	unsigned long blink;
+	unsigned long fp;
+	unsigned long r26;	/* gp */
+
+	unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+
+	unsigned long sp;	/* User/Kernel depending on where we came from */
+	unsigned long orig_r0;
+
+	/*
+	 * To distinguish bet excp, syscall, irq
+	 * For traps and exceptions, Exception Cause Register.
+	 * 	ECR: <00> <VV> <CC> <PP>
+	 * 	Last word used by Linux for extra state mgmt (syscall-restart)
+	 * For interrupts, use artificial ECR values to note current prio-level
+	 */
+	union {
+		struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+			unsigned long state:8, ecr_vec:8,
+				      ecr_cause:8, ecr_param:8;
+#else
+			unsigned long ecr_param:8, ecr_cause:8,
+				      ecr_vec:8, state:8;
+#endif
+		};
+		unsigned long event;
+	};
+
+	unsigned long user_r25;
+};
+#else
+
+struct pt_regs {
+
+	unsigned long orig_r0;
+
+	union {
+		struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+			unsigned long state:8, ecr_vec:8,
+				      ecr_cause:8, ecr_param:8;
+#else
+			unsigned long ecr_param:8, ecr_cause:8,
+				      ecr_vec:8, state:8;
+#endif
+		};
+		unsigned long event;
+	};
+
+	unsigned long bta;	/* bta_l1, bta_l2, erbta */
+
+	unsigned long user_r25;
+
+	unsigned long r26;	/* gp */
+	unsigned long fp;
+	unsigned long sp;	/* user/kernel sp depending on where we came from  */
+
+	unsigned long r12, r30;
+
+	/*------- Below list auto saved by h/w -----------*/
+	unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+
+	unsigned long blink;
+	unsigned long lp_end, lp_start, lp_count;
+
+	unsigned long ei, ldi, jli;
+
+	unsigned long ret;
+	unsigned long status32;
+};
+
+#endif
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+	unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+};
+
+#define instruction_pointer(regs)	((regs)->ret)
+#define profile_pc(regs)		instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({  unsigned int sp;		\
+	if (user_mode(regs))	\
+		sp = (regs)->sp;\
+	else			\
+		sp = -1;	\
+	sp;			\
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs)    ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
+#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
+
+#define STATE_SCALL_RESTARTED	0x01
+
+#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(reg) !(reg->state &  STATE_SCALL_RESTARTED)
+
+#define current_pt_regs()					\
+({								\
+	/* open-coded current_thread_info() */			\
+	register unsigned long sp asm ("sp");			\
+	unsigned long pg_start = (sp & ~(THREAD_SIZE - 1));	\
+	(struct pt_regs *)(pg_start + THREAD_SIZE) - 1;	\
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+	return (long)regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 0000000..09db952
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char __arc_dccm_base[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 0000000..da2c459
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
+
+#define KERNEL_DS		MAKE_MM_SEG(0)
+#define USER_DS			MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b)	((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 0000000..744a6ae
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early 8250 (now earlycon) requires BASE_BAUD to be defined in this header.
+ * However to still determine it dynamically (for multi-platform images)
+ * we do this in a helper by parsing the FDT early
+ */
+
+extern unsigned int __init arc_early_base_baud(void);
+
+#define BASE_BAUD	arc_early_base_baud()
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 0000000..3078466
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+	int id;
+	const char *str;
+};
+
+struct cpuinfo_data {
+	struct id_to_str info;
+	int up_range;
+};
+
+extern int root_mountflags, end_mem;
+
+void setup_processor(void);
+void __init setup_arch_memory(void);
+
+/* Helpers used in arc_*_mumbojumbo routines */
+#define IS_AVAIL1(v, s)		((v) ? s : "")
+#define IS_DISABLED_RUN(v)	((v) ? "" : "(disabled) ")
+#define IS_USED_RUN(v)		((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg)	IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg)	IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 0000000..fffeecc
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define	SHMLBA	(2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 0000000..9913804
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ *	Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP
+ *
+ * @info:		SoC SMP specific info for /proc/cpuinfo etc
+ * @init_early_smp:	A SMP specific h/w block can init itself
+ * 			Could be common across platforms so not covered by
+ * 			mach_desc->init_early()
+ * @init_per_cpu:	Called for each core so SMP h/w block driver can do
+ * 			any needed setup per cpu (e.g. IPI request)
+ * @cpu_kick:		For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send:		To send IPI to a @cpu
+ * @ips_clear:		To clear IPI received at @irq
+ */
+struct plat_smp_ops {
+	const char 	*info;
+	void		(*init_early_smp)(void);
+	void		(*init_per_cpu)(int cpu);
+	void		(*cpu_kick)(int cpu, unsigned long pc);
+	void		(*ipi_send)(int cpu);
+	void		(*ipi_clear)(int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops  plat_smp_ops;
+
+#else /* CONFIG_SMP */
+
+static inline void smp_init_cpus(void) {}
+static inline const char *arc_platform_smp_cpuinfo(void)
+{
+	return "";
+}
+
+#endif  /* !CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ *	support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ *	gaurantted by the platform (not something which core handles).
+ *	Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ *	disabling for atomicity.
+ *
+ *	However exported spinlock API is not usable due to cyclic hdr deps
+ *	(even after system.h disintegration upstream)
+ *	asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ *		-> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ *	So the workaround is to use the lowest level arch spinlock API.
+ *	The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ *	but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags)	do {		\
+	local_irq_save(flags);			\
+	arch_spin_lock(&smp_atomic_ops_lock);	\
+} while (0)
+
+#define atomic_ops_unlock(flags) do {		\
+	arch_spin_unlock(&smp_atomic_ops_lock);	\
+	local_irq_restore(flags);		\
+} while (0)
+
+#define bitops_lock(flags)	do {		\
+	local_irq_save(flags);			\
+	arch_spin_lock(&smp_bitops_lock);	\
+} while (0)
+
+#define bitops_unlock(flags) do {		\
+	arch_spin_unlock(&smp_bitops_lock);	\
+	local_irq_restore(flags);		\
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags)		local_irq_save(flags)
+#define atomic_ops_unlock(flags)	local_irq_restore(flags)
+
+#define bitops_lock(flags)		local_irq_save(flags)
+#define bitops_unlock(flags)		local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif	/* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 0000000..db8c59d
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+	do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * A normal LLOCK/SCOND based system, w/o need for livelock workaround
+ */
+#ifndef CONFIG_ARC_STAR_9000923308
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	unsigned int val;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[slock]]	\n"
+	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
+	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
+	"	bnz	1b			\n"
+	"					\n"
+	: [val]		"=&r"	(val)
+	: [slock]	"r"	(&(lock->slock)),
+	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned int val, got_it = 0;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[slock]]	\n"
+	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
+	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
+	"	bnz	1b			\n"
+	"	mov	%[got_it], 1		\n"
+	"4:					\n"
+	"					\n"
+	: [val]		"=&r"	(val),
+	  [got_it]	"+&r"	(got_it)
+	: [slock]	"r"	(&(lock->slock)),
+	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
+	: "memory", "cc");
+
+	smp_mb();
+
+	return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	smp_mb();
+
+	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+	smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+
+	smp_mb();
+
+	/*
+	 * zero means writer holds the lock exclusively, deny Reader.
+	 * Otherwise grant lock to first/subseq reader
+	 *
+	 * 	if (rw->counter > 0) {
+	 *		rw->counter--;
+	 *		ret = 1;
+	 *	}
+	 */
+
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
+	"	sub	%[val], %[val], 1	\n"	/* reader lock */
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bnz	1b			\n"
+	"					\n"
+	: [val]		"=&r"	(val)
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	unsigned int val, got_it = 0;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
+	"	sub	%[val], %[val], 1	\n"	/* counter-- */
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bnz	1b			\n"	/* retry if collided with someone */
+	"	mov	%[got_it], 1		\n"
+	"					\n"
+	"4: ; --- done ---			\n"
+
+	: [val]		"=&r"	(val),
+	  [got_it]	"+&r"	(got_it)
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+
+	return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+
+	smp_mb();
+
+	/*
+	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+	 * deny writer. Otherwise if unlocked grant to writer
+	 * Hence the claim that Linux rwlocks are unfair to writers.
+	 * (can be starved for an indefinite time by readers).
+	 *
+	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+	 *		rw->counter = 0;
+	 *		ret = 1;
+	 *	}
+	 */
+
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
+	"	mov	%[val], %[WR_LOCKED]	\n"
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bnz	1b			\n"
+	"					\n"
+	: [val]		"=&r"	(val)
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	unsigned int val, got_it = 0;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
+	"	mov	%[val], %[WR_LOCKED]	\n"
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bnz	1b			\n"	/* retry if collided with someone */
+	"	mov	%[got_it], 1		\n"
+	"					\n"
+	"4: ; --- done ---			\n"
+
+	: [val]		"=&r"	(val),
+	  [got_it]	"+&r"	(got_it)
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+
+	return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+
+	smp_mb();
+
+	/*
+	 * rw->counter++;
+	 */
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	add	%[val], %[val], 1	\n"
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bnz	1b			\n"
+	"					\n"
+	: [val]		"=&r"	(val)
+	: [rwlock]	"r"	(&(rw->counter))
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	smp_mb();
+
+	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+	smp_mb();
+}
+
+#else	/* CONFIG_ARC_STAR_9000923308 */
+
+/*
+ * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
+ * coherency transactions in the SCU. The exclusive line state keeps rotating
+ * among contenting cores leading to a never ending cycle. So break the cycle
+ * by deferring the retry of failed exclusive access (SCOND). The actual delay
+ * needed is function of number of contending cores as well as the unrelated
+ * coherency traffic from other cores. To keep the code simple, start off with
+ * small delay of 1 which would suffice most cases and in case of contention
+ * double the delay. Eventually the delay is sufficient such that the coherency
+ * pipeline is drained, thus a subsequent exclusive access would succeed.
+ */
+
+#define SCOND_FAIL_RETRY_VAR_DEF						\
+	unsigned int delay, tmp;						\
+
+#define SCOND_FAIL_RETRY_ASM							\
+	"   ; --- scond fail delay ---		\n"				\
+	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
+	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
+	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
+	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
+	"	b	1b			\n"	/* start over */	\
+	"					\n"				\
+	"4: ; --- done ---			\n"				\
+
+#define SCOND_FAIL_RETRY_VARS							\
+	  ,[delay] "=&r" (delay), [tmp] "=&r"	(tmp)				\
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	unsigned int val;
+	SCOND_FAIL_RETRY_VAR_DEF;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"0:	mov	%[delay], 1		\n"
+	"1:	llock	%[val], [%[slock]]	\n"
+	"	breq	%[val], %[LOCKED], 0b	\n"	/* spin while LOCKED */
+	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
+	"	bz	4f			\n"	/* done */
+	"					\n"
+	SCOND_FAIL_RETRY_ASM
+
+	: [val]		"=&r"	(val)
+	  SCOND_FAIL_RETRY_VARS
+	: [slock]	"r"	(&(lock->slock)),
+	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned int val, got_it = 0;
+	SCOND_FAIL_RETRY_VAR_DEF;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"0:	mov	%[delay], 1		\n"
+	"1:	llock	%[val], [%[slock]]	\n"
+	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
+	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
+	"	bz.d	4f			\n"
+	"	mov.z	%[got_it], 1		\n"	/* got it */
+	"					\n"
+	SCOND_FAIL_RETRY_ASM
+
+	: [val]		"=&r"	(val),
+	  [got_it]	"+&r"	(got_it)
+	  SCOND_FAIL_RETRY_VARS
+	: [slock]	"r"	(&(lock->slock)),
+	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
+	: "memory", "cc");
+
+	smp_mb();
+
+	return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	smp_mb();
+
+	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+	smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+	SCOND_FAIL_RETRY_VAR_DEF;
+
+	smp_mb();
+
+	/*
+	 * zero means writer holds the lock exclusively, deny Reader.
+	 * Otherwise grant lock to first/subseq reader
+	 *
+	 * 	if (rw->counter > 0) {
+	 *		rw->counter--;
+	 *		ret = 1;
+	 *	}
+	 */
+
+	__asm__ __volatile__(
+	"0:	mov	%[delay], 1		\n"
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brls	%[val], %[WR_LOCKED], 0b\n"	/* <= 0: spin while write locked */
+	"	sub	%[val], %[val], 1	\n"	/* reader lock */
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bz	4f			\n"	/* done */
+	"					\n"
+	SCOND_FAIL_RETRY_ASM
+
+	: [val]		"=&r"	(val)
+	  SCOND_FAIL_RETRY_VARS
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	unsigned int val, got_it = 0;
+	SCOND_FAIL_RETRY_VAR_DEF;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"0:	mov	%[delay], 1		\n"
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
+	"	sub	%[val], %[val], 1	\n"	/* counter-- */
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bz.d	4f			\n"
+	"	mov.z	%[got_it], 1		\n"	/* got it */
+	"					\n"
+	SCOND_FAIL_RETRY_ASM
+
+	: [val]		"=&r"	(val),
+	  [got_it]	"+&r"	(got_it)
+	  SCOND_FAIL_RETRY_VARS
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+
+	return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+	SCOND_FAIL_RETRY_VAR_DEF;
+
+	smp_mb();
+
+	/*
+	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+	 * deny writer. Otherwise if unlocked grant to writer
+	 * Hence the claim that Linux rwlocks are unfair to writers.
+	 * (can be starved for an indefinite time by readers).
+	 *
+	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+	 *		rw->counter = 0;
+	 *		ret = 1;
+	 *	}
+	 */
+
+	__asm__ __volatile__(
+	"0:	mov	%[delay], 1		\n"
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brne	%[val], %[UNLOCKED], 0b	\n"	/* while !UNLOCKED spin */
+	"	mov	%[val], %[WR_LOCKED]	\n"
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bz	4f			\n"
+	"					\n"
+	SCOND_FAIL_RETRY_ASM
+
+	: [val]		"=&r"	(val)
+	  SCOND_FAIL_RETRY_VARS
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	unsigned int val, got_it = 0;
+	SCOND_FAIL_RETRY_VAR_DEF;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"0:	mov	%[delay], 1		\n"
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
+	"	mov	%[val], %[WR_LOCKED]	\n"
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bz.d	4f			\n"
+	"	mov.z	%[got_it], 1		\n"	/* got it */
+	"					\n"
+	SCOND_FAIL_RETRY_ASM
+
+	: [val]		"=&r"	(val),
+	  [got_it]	"+&r"	(got_it)
+	  SCOND_FAIL_RETRY_VARS
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
+	  [WR_LOCKED]	"ir"	(0)
+	: "memory", "cc");
+
+	smp_mb();
+
+	return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+
+	smp_mb();
+
+	/*
+	 * rw->counter++;
+	 */
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	add	%[val], %[val], 1	\n"
+	"	scond	%[val], [%[rwlock]]	\n"
+	"	bnz	1b			\n"
+	"					\n"
+	: [val]		"=&r"	(val)
+	: [rwlock]	"r"	(&(rw->counter))
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	unsigned int val;
+
+	smp_mb();
+
+	/*
+	 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+	 */
+	__asm__ __volatile__(
+	"1:	llock	%[val], [%[rwlock]]	\n"
+	"	scond	%[UNLOCKED], [%[rwlock]]\n"
+	"	bnz	1b			\n"
+	"					\n"
+	: [val]		"=&r"	(val)
+	: [rwlock]	"r"	(&(rw->counter)),
+	  [UNLOCKED]	"r"	(__ARCH_RW_LOCK_UNLOCKED__)
+	: "memory", "cc");
+
+	smp_mb();
+}
+
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+#endif	/* CONFIG_ARC_STAR_9000923308 */
+
+#else	/* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
+
+	/*
+	 * This smp_mb() is technically superfluous, we only need the one
+	 * after the lock for providing the ACQUIRE semantics.
+	 * However doing the "right" thing was regressing hackbench
+	 * so keeping this, pending further investigation
+	 */
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	ex  %0, [%1]		\n"
+	"	breq  %0, %2, 1b	\n"
+	: "+&r" (val)
+	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+	: "memory");
+
+	/*
+	 * ACQUIRE barrier to ensure load/store after taking the lock
+	 * don't "bleed-up" out of the critical section (leak-in is allowed)
+	 * http://www.spinics.net/lists/kernel/msg2010409.html
+	 *
+	 * ARCv2 only has load-load, store-store and all-all barrier
+	 * thus need the full all-all barrier
+	 */
+	smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+	"1:	ex  %0, [%1]		\n"
+	: "+r" (val)
+	: "r"(&(lock->slock))
+	: "memory");
+
+	smp_mb();
+
+	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+	/*
+	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
+	 * is the only option
+	 */
+	smp_mb();
+
+	__asm__ __volatile__(
+	"	ex  %0, [%1]		\n"
+	: "+r" (val)
+	: "r"(&(lock->slock))
+	: "memory");
+
+	/*
+	 * superfluous, but keeping for now - see pairing version in
+	 * arch_spin_lock above
+	 */
+	smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ */
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	int ret = 0;
+
+	arch_spin_lock(&(rw->lock_mutex));
+
+	/*
+	 * zero means writer holds the lock exclusively, deny Reader.
+	 * Otherwise grant lock to first/subseq reader
+	 */
+	if (rw->counter > 0) {
+		rw->counter--;
+		ret = 1;
+	}
+
+	arch_spin_unlock(&(rw->lock_mutex));
+
+	smp_mb();
+	return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	int ret = 0;
+
+	arch_spin_lock(&(rw->lock_mutex));
+
+	/*
+	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+	 * deny writer. Otherwise if unlocked grant to writer
+	 * Hence the claim that Linux rwlocks are unfair to writers.
+	 * (can be starved for an indefinite time by readers).
+	 */
+	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+		rw->counter = 0;
+		ret = 1;
+	}
+	arch_spin_unlock(&(rw->lock_mutex));
+
+	return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	while (!arch_read_trylock(rw))
+		cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	while (!arch_write_trylock(rw))
+		cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	arch_spin_lock(&(rw->lock_mutex));
+	rw->counter++;
+	arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	arch_spin_lock(&(rw->lock_mutex));
+	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+	arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#endif
+
+#define arch_read_can_lock(x)	((x)->counter > 0)
+#define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+#define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
+
+#define arch_spin_relax(lock)	cpu_relax()
+#define arch_read_relax(lock)	cpu_relax()
+#define arch_write_relax(lock)	cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..4e1ef5f
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+	volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__	0
+#define __ARCH_SPIN_LOCK_LOCKED__	1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED		{ __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked     : 0x0100_0000
+ * Read lock(s) : 0x00FF_FFFF to 0x01  (Multiple Readers decrement it)
+ * Write lock   : 0x0, but only if prior value is "unlocked" 0x0100_0000
+ */
+typedef struct {
+	volatile unsigned int	counter;
+#ifndef CONFIG_ARC_HAS_LLSC
+	arch_spinlock_t		lock_mutex;
+#endif
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__	0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED		{ .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 0000000..b29b606
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk:		NULL for current task, specific task otherwise
+ * @regs:		pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * 			If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * 			use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn:	Callback invoked for each frame unwound
+ * 			Returns 0 to continue unwinding, -1 to stop
+ * @arg:		Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ *  - synchronous unwinding (e.g. dump_stack): @tsk  NULL, @regs  NULL
+ *  - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs  NULL
+ *  - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+	struct task_struct *tsk, struct pt_regs *regs,
+	int (*consumer_fn) (unsigned int, void *),
+	void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 0000000..95822b5
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -We had half-optimised memset/memcpy, got better versions of those
+ *  -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 0000000..1b171ab
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n)	fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last)	\
+do {					\
+	ARC_FPU_PREV(prev, next);	\
+	last = __switch_to(prev, next);\
+	ARC_FPU_NEXT(next);		\
+	mb();				\
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 0000000..29de098
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H  1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>		/* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+	if (user_mode(regs) && in_syscall(regs))
+		return regs->r8;
+	else
+		return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+	regs->r0 = regs->orig_r0;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+	/* 0 if syscall succeeded, otherwise -Errorcode */
+	return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+	return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+			 int error, long val)
+{
+	regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i:      argument index [0,5]
+ * @n:      number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+		      unsigned int i, unsigned int n, unsigned long *args)
+{
+	unsigned long *inside_ptregs = &(regs->r0);
+	inside_ptregs -= i;
+
+	BUG_ON((i + n) > 6);
+
+	while (n--) {
+		args[i++] = (*inside_ptregs);
+		inside_ptregs--;
+	}
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 0000000..e56f9fc
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H  1
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 0000000..3af6745
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ *  No need for ARC specific thread_info allocator (kmalloc/free). This is
+ *  anyways one page allocation, thus slab alloc can be short-circuited and
+ *  the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE     (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SHIFT	(PAGE_SHIFT << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ *   must also be changed
+ */
+struct thread_info {
+	unsigned long flags;		/* low level flags */
+	int preempt_count;		/* 0 => preemptable, <0 => BUG */
+	struct task_struct *task;	/* main task structure */
+	mm_segment_t addr_limit;	/* thread address space */
+	__u32 cpu;			/* current CPU */
+	unsigned long thr_ptr;		/* TLS ptr */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task       = &tsk,			\
+	.flags      = 0,			\
+	.cpu        = 0,			\
+	.preempt_count  = INIT_PREEMPT_COUNT,	\
+	.addr_limit = KERNEL_DS,		\
+}
+
+#define init_thread_info    (init_thread_union.thread_info)
+#define init_stack          (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+	register unsigned long sp asm("sp");
+	return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ *   access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK	0	/* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT	4	/* syscall auditing active */
+#define TIF_SYSCALL_TRACE	15	/* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE		16
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE		(1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+				 _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 0000000..0a82960
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE	80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 0000000..8a1ec96
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#include <asm/mmu.h>
+
+#if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1)
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+;   and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+;    didn't want to change setup.c
+;    hence extra instruction to clean
+;
+; -- should be in cache since in same line
+;    as r0/r1 saves above
+;
+ld  r0,[jh_ex_way_sel]  ; victim pointer
+and r0,r0,1         ; clean
+xor.f   r0,r0,1         ; flip
+st  r0,[jh_ex_way_sel]  ; store back
+asr r0,r1,12        ; get set # <<1, note bit 12=R=0
+or.nz   r0,r0,1         ; set way bit
+and r0,r0,0xff      ; clean
+sr  r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+;  Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+;  Slower in thrash case (where it matters) because more code is executed
+;  Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr      r0,[eret]               /* instruction address */
+xor     r0,r0,r1                /* compare set #       */
+and.f   r0,r0,0x000fe000        /* 2-way MMU mask      */
+bne     88f                     /* not in same set - no need to probe */
+
+lr      r0,[eret]               /* instruction address */
+and     r0,r0,PAGE_MASK         /* VPN of instruction address */
+; lr  r1,[ARC_REG_TLBPD0]     /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and     r1,r1,0xff              /* Data ASID */
+or      r0,r0,r1                /* Instruction address + Data ASID */
+
+lr      r1,[ARC_REG_TLBPD0]     /* save TLBPD0 containing data TLB*/
+sr      r0,[ARC_REG_TLBPD0]     /* write instruction address to TLBPD0 */
+sr      TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr      r0,[ARC_REG_TLBINDEX]   /* r0 = index where instruction is, if at all */
+sr      r1,[ARC_REG_TLBPD0]     /* restore TLBPD0 */
+
+xor     r0,r0,1                 /* flip bottom bit of data index */
+b.d     89f
+sr      r0,[ARC_REG_TLBINDEX]   /* and put it back */
+88:
+sr  TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s   r3, r1                  ; save PD0 prepared by TLB_RELOAD in r3
+lr      r0,[eret]               /* instruction address */
+and     r0,r0,PAGE_MASK         /* VPN of instruction address */
+bmsk    r1,r3,7                 /* Data ASID, bits 7-0 */
+or_s    r0,r0,r1                /* Instruction address + Data ASID */
+
+sr      r0,[ARC_REG_TLBPD0]     /* write instruction address to TLBPD0 */
+sr      TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr      r0,[ARC_REG_TLBINDEX]   /* r0 = index where instruction is, if at all */
+sr      r3,[ARC_REG_TLBPD0]     /* restore TLBPD0 */
+
+sr      TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr      r1,[ARC_REG_TLBINDEX]   /* r1 = index where MMU wants to put data */
+cmp     r0,r1                   /* if no match on indices, go around */
+xor.eq  r1,r1,1                 /* flip bottom bit of data index */
+sr      r1,[ARC_REG_TLBINDEX]   /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 0000000..a9db5f6
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#define tlb_flush(tlb)				\
+do {						\
+	if (tlb->fullmm)			\
+		flush_tlb_mm((tlb)->mm);	\
+} while (0)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+#define tlb_start_vma(tlb, vma)
+#else
+#define tlb_start_vma(tlb, vma)						\
+do {									\
+	if (!tlb->fullmm)						\
+		flush_cache_range(vma, vma->vm_start, vma->vm_end);	\
+} while(0)
+#endif
+
+#define tlb_end_vma(tlb, vma)						\
+do {									\
+	if (!tlb->fullmm)						\
+		flush_tlb_range(vma, vma->vm_start, vma->vm_end);	\
+} while (0)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 0000000..1fe9c8c
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+			   unsigned long start, unsigned long end);
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			       unsigned long end);
+
+#ifndef CONFIG_SMP
+#define flush_tlb_range(vma, s, e)	local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e)	local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all()			local_flush_tlb_all()
+#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
+#define flush_pmd_tlb_range(vma, s, e)	local_flush_pmd_tlb_range(vma, s, e)
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+							 unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+
+#endif /* CONFIG_SMP */
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 0000000..d4d8df7
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,757 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ *    -__clear_user( ) called multiple times during elf load was byte loop
+ *    converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ *    -Hand crafted constant propagation for "constant" copy sizes
+ *    -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ *    -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ *    -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ *    -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h>	/* for generic string functions */
+
+
+#define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ * 	(start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ *	(len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majority of cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz)	(((sz) <= TASK_SIZE) && \
+				 ((addr) <= (get_fs() - (sz))))
+#define __access_ok(addr, sz)	(unlikely(__kernel_ok) || \
+				 likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k)					\
+({								\
+	long __ret = 0;	/* success by default */	\
+	switch (sz) {						\
+	case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break;	\
+	case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break;	\
+	case 4: __arc_get_user_one(*(k), u, "ld", __ret);  break;	\
+	case 8: __arc_get_user_one_64(*(k), u, __ret);     break;	\
+	}							\
+	__ret;							\
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret)	\
+	__asm__ __volatile__(                   \
+	"1:	"op"    %1,[%2]\n"		\
+	"2:	;nop\n"				\
+	"	.section .fixup, \"ax\"\n"	\
+	"	.align 4\n"			\
+	"3:	# return -EFAULT\n"		\
+	"	mov %0, %3\n"			\
+	"	# zero out dst ptr\n"		\
+	"	mov %1,  0\n"			\
+	"	j   2b\n"			\
+	"	.previous\n"			\
+	"	.section __ex_table, \"a\"\n"	\
+	"	.align 4\n"			\
+	"	.word 1b,3b\n"			\
+	"	.previous\n"			\
+						\
+	: "+r" (ret), "=r" (dst)		\
+	: "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret)	\
+	__asm__ __volatile__(                   \
+	"1:	ld   %1,[%2]\n"			\
+	"4:	ld  %R1,[%2, 4]\n"		\
+	"2:	;nop\n"				\
+	"	.section .fixup, \"ax\"\n"	\
+	"	.align 4\n"			\
+	"3:	# return -EFAULT\n"		\
+	"	mov %0, %3\n"			\
+	"	# zero out dst ptr\n"		\
+	"	mov %1,  0\n"			\
+	"	mov %R1, 0\n"			\
+	"	j   2b\n"			\
+	"	.previous\n"			\
+	"	.section __ex_table, \"a\"\n"	\
+	"	.align 4\n"			\
+	"	.word 1b,3b\n"			\
+	"	.word 4b,3b\n"			\
+	"	.previous\n"			\
+						\
+	: "+r" (ret), "=r" (dst)		\
+	: "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k)					\
+({								\
+	long __ret = 0;	/* success by default */	\
+	switch (sz) {						\
+	case 1: __arc_put_user_one(*(k), u, "stb", __ret); break;	\
+	case 2: __arc_put_user_one(*(k), u, "stw", __ret); break;	\
+	case 4: __arc_put_user_one(*(k), u, "st", __ret);  break;	\
+	case 8: __arc_put_user_one_64(*(k), u, __ret);     break;	\
+	}							\
+	__ret;							\
+})
+
+#define __arc_put_user_one(src, dst, op, ret)	\
+	__asm__ __volatile__(                   \
+	"1:	"op"    %1,[%2]\n"		\
+	"2:	;nop\n"				\
+	"	.section .fixup, \"ax\"\n"	\
+	"	.align 4\n"			\
+	"3:	mov %0, %3\n"			\
+	"	j   2b\n"			\
+	"	.previous\n"			\
+	"	.section __ex_table, \"a\"\n"	\
+	"	.align 4\n"			\
+	"	.word 1b,3b\n"			\
+	"	.previous\n"			\
+						\
+	: "+r" (ret)				\
+	: "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret)	\
+	__asm__ __volatile__(                   \
+	"1:	st   %1,[%2]\n"			\
+	"4:	st  %R1,[%2, 4]\n"		\
+	"2:	;nop\n"				\
+	"	.section .fixup, \"ax\"\n"	\
+	"	.align 4\n"			\
+	"3:	mov %0, %3\n"			\
+	"	j   2b\n"			\
+	"	.previous\n"			\
+	"	.section __ex_table, \"a\"\n"	\
+	"	.align 4\n"			\
+	"	.word 1b,3b\n"			\
+	"	.word 4b,3b\n"			\
+	"	.previous\n"			\
+						\
+	: "+r" (ret)				\
+	: "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	long res = 0;
+	char val;
+	unsigned long tmp1, tmp2, tmp3, tmp4;
+	unsigned long orig_n = n;
+
+	if (n == 0)
+		return 0;
+
+	/* unaligned */
+	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+		unsigned char tmp;
+
+		__asm__ __volatile__ (
+		"	mov.f   lp_count, %0		\n"
+		"	lpnz 2f				\n"
+		"1:	ldb.ab  %1, [%3, 1]		\n"
+		"	stb.ab  %1, [%2, 1]		\n"
+		"	sub     %0,%0,1			\n"
+		"2:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"3:	j   2b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 3b			\n"
+		"	.previous			\n"
+
+		: "+r" (n),
+		/*
+		 * Note as an '&' earlyclobber operand to make sure the
+		 * temporary register inside the loop is not the same as
+		 *  FROM or TO.
+		*/
+		  "=&r" (tmp), "+r" (to), "+r" (from)
+		:
+		: "lp_count", "lp_start", "lp_end", "memory");
+
+		return n;
+	}
+
+	/*
+	 * Hand-crafted constant propagation to reduce code sz of the
+	 * laddered copy 16x,8,4,2,1
+	 */
+	if (__builtin_constant_p(orig_n)) {
+		res = orig_n;
+
+		if (orig_n / 16) {
+			orig_n = orig_n % 16;
+
+			__asm__ __volatile__(
+			"	lsr   lp_count, %7,4		\n"
+			"	lp    3f			\n"
+			"1:	ld.ab   %3, [%2, 4]		\n"
+			"11:	ld.ab   %4, [%2, 4]		\n"
+			"12:	ld.ab   %5, [%2, 4]		\n"
+			"13:	ld.ab   %6, [%2, 4]		\n"
+			"	st.ab   %3, [%1, 4]		\n"
+			"	st.ab   %4, [%1, 4]		\n"
+			"	st.ab   %5, [%1, 4]		\n"
+			"	st.ab   %6, [%1, 4]		\n"
+			"	sub     %0,%0,16		\n"
+			"3:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   3b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   1b, 4b			\n"
+			"	.word   11b,4b			\n"
+			"	.word   12b,4b			\n"
+			"	.word   13b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+			: "ir"(n)
+			: "lp_count", "memory");
+		}
+		if (orig_n / 8) {
+			orig_n = orig_n % 8;
+
+			__asm__ __volatile__(
+			"14:	ld.ab   %3, [%2,4]		\n"
+			"15:	ld.ab   %4, [%2,4]		\n"
+			"	st.ab   %3, [%1,4]		\n"
+			"	st.ab   %4, [%1,4]		\n"
+			"	sub     %0,%0,8			\n"
+			"31:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   31b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   14b,4b			\n"
+			"	.word   15b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2)
+			:
+			: "memory");
+		}
+		if (orig_n / 4) {
+			orig_n = orig_n % 4;
+
+			__asm__ __volatile__(
+			"16:	ld.ab   %3, [%2,4]		\n"
+			"	st.ab   %3, [%1,4]		\n"
+			"	sub     %0,%0,4			\n"
+			"32:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   32b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   16b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n / 2) {
+			orig_n = orig_n % 2;
+
+			__asm__ __volatile__(
+			"17:	ldw.ab   %3, [%2,2]		\n"
+			"	stw.ab   %3, [%1,2]		\n"
+			"	sub      %0,%0,2		\n"
+			"33:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   33b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   17b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n & 1) {
+			__asm__ __volatile__(
+			"18:	ldb.ab   %3, [%2,2]		\n"
+			"	stb.ab   %3, [%1,2]		\n"
+			"	sub      %0,%0,1		\n"
+			"34:	; nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   34b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   18b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
+
+		__asm__ __volatile__(
+		"	mov %0,%3			\n"
+		"	lsr.f   lp_count, %3,4		\n"  /* 16x bytes */
+		"	lpnz    3f			\n"
+		"1:	ld.ab   %5, [%2, 4]		\n"
+		"11:	ld.ab   %6, [%2, 4]		\n"
+		"12:	ld.ab   %7, [%2, 4]		\n"
+		"13:	ld.ab   %8, [%2, 4]		\n"
+		"	st.ab   %5, [%1, 4]		\n"
+		"	st.ab   %6, [%1, 4]		\n"
+		"	st.ab   %7, [%1, 4]		\n"
+		"	st.ab   %8, [%1, 4]		\n"
+		"	sub     %0,%0,16		\n"
+		"3:	and.f   %3,%3,0xf		\n"  /* stragglers */
+		"	bz      34f			\n"
+		"	bbit0   %3,3,31f		\n"  /* 8 bytes left */
+		"14:	ld.ab   %5, [%2,4]		\n"
+		"15:	ld.ab   %6, [%2,4]		\n"
+		"	st.ab   %5, [%1,4]		\n"
+		"	st.ab   %6, [%1,4]		\n"
+		"	sub.f   %0,%0,8			\n"
+		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
+		"16:	ld.ab   %5, [%2,4]		\n"
+		"	st.ab   %5, [%1,4]		\n"
+		"	sub.f   %0,%0,4			\n"
+		"32:	bbit0   %3,1,33f		\n"  /* 2 bytes left */
+		"17:	ldw.ab  %5, [%2,2]		\n"
+		"	stw.ab  %5, [%1,2]		\n"
+		"	sub.f   %0,%0,2			\n"
+		"33:	bbit0   %3,0,34f		\n"
+		"18:	ldb.ab  %5, [%2,1]		\n"  /* 1 byte left */
+		"	stb.ab  %5, [%1,1]		\n"
+		"	sub.f   %0,%0,1			\n"
+		"34:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"4:	j   34b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 4b			\n"
+		"	.word   11b,4b			\n"
+		"	.word   12b,4b			\n"
+		"	.word   13b,4b			\n"
+		"	.word   14b,4b			\n"
+		"	.word   15b,4b			\n"
+		"	.word   16b,4b			\n"
+		"	.word   17b,4b			\n"
+		"	.word   18b,4b			\n"
+		"	.previous			\n"
+		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+		:
+		: "lp_count", "memory");
+	}
+
+	return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+					   unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	long res = 0;
+	char val;
+	unsigned long tmp1, tmp2, tmp3, tmp4;
+	unsigned long orig_n = n;
+
+	if (n == 0)
+		return 0;
+
+	/* unaligned */
+	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+		unsigned char tmp;
+
+		__asm__ __volatile__(
+		"	mov.f   lp_count, %0		\n"
+		"	lpnz 3f				\n"
+		"	ldb.ab  %1, [%3, 1]		\n"
+		"1:	stb.ab  %1, [%2, 1]		\n"
+		"	sub     %0, %0, 1		\n"
+		"3:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"4:	j   3b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 4b			\n"
+		"	.previous			\n"
+
+		: "+r" (n),
+		/* Note as an '&' earlyclobber operand to make sure the
+		 * temporary register inside the loop is not the same as
+		 * FROM or TO.
+		 */
+		  "=&r" (tmp), "+r" (to), "+r" (from)
+		:
+		: "lp_count", "lp_start", "lp_end", "memory");
+
+		return n;
+	}
+
+	if (__builtin_constant_p(orig_n)) {
+		res = orig_n;
+
+		if (orig_n / 16) {
+			orig_n = orig_n % 16;
+
+			__asm__ __volatile__(
+			"	lsr lp_count, %7,4		\n"
+			"	lp  3f				\n"
+			"	ld.ab %3, [%2, 4]		\n"
+			"	ld.ab %4, [%2, 4]		\n"
+			"	ld.ab %5, [%2, 4]		\n"
+			"	ld.ab %6, [%2, 4]		\n"
+			"1:	st.ab %3, [%1, 4]		\n"
+			"11:	st.ab %4, [%1, 4]		\n"
+			"12:	st.ab %5, [%1, 4]		\n"
+			"13:	st.ab %6, [%1, 4]		\n"
+			"	sub   %0, %0, 16		\n"
+			"3:;nop					\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   3b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   1b, 4b			\n"
+			"	.word   11b,4b			\n"
+			"	.word   12b,4b			\n"
+			"	.word   13b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+			: "ir"(n)
+			: "lp_count", "memory");
+		}
+		if (orig_n / 8) {
+			orig_n = orig_n % 8;
+
+			__asm__ __volatile__(
+			"	ld.ab   %3, [%2,4]		\n"
+			"	ld.ab   %4, [%2,4]		\n"
+			"14:	st.ab   %3, [%1,4]		\n"
+			"15:	st.ab   %4, [%1,4]		\n"
+			"	sub     %0, %0, 8		\n"
+			"31:;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   31b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   14b,4b			\n"
+			"	.word   15b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2)
+			:
+			: "memory");
+		}
+		if (orig_n / 4) {
+			orig_n = orig_n % 4;
+
+			__asm__ __volatile__(
+			"	ld.ab   %3, [%2,4]		\n"
+			"16:	st.ab   %3, [%1,4]		\n"
+			"	sub     %0, %0, 4		\n"
+			"32:;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   32b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   16b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n / 2) {
+			orig_n = orig_n % 2;
+
+			__asm__ __volatile__(
+			"	ldw.ab    %3, [%2,2]		\n"
+			"17:	stw.ab    %3, [%1,2]		\n"
+			"	sub       %0, %0, 2		\n"
+			"33:;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   33b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   17b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n & 1) {
+			__asm__ __volatile__(
+			"	ldb.ab  %3, [%2,1]		\n"
+			"18:	stb.ab  %3, [%1,1]		\n"
+			"	sub     %0, %0, 1		\n"
+			"34:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   34b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   18b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
+
+		__asm__ __volatile__(
+		"	mov   %0,%3			\n"
+		"	lsr.f lp_count, %3,4		\n"  /* 16x bytes */
+		"	lpnz  3f			\n"
+		"	ld.ab %5, [%2, 4]		\n"
+		"	ld.ab %6, [%2, 4]		\n"
+		"	ld.ab %7, [%2, 4]		\n"
+		"	ld.ab %8, [%2, 4]		\n"
+		"1:	st.ab %5, [%1, 4]		\n"
+		"11:	st.ab %6, [%1, 4]		\n"
+		"12:	st.ab %7, [%1, 4]		\n"
+		"13:	st.ab %8, [%1, 4]		\n"
+		"	sub   %0, %0, 16		\n"
+		"3:	and.f %3,%3,0xf			\n" /* stragglers */
+		"	bz 34f				\n"
+		"	bbit0   %3,3,31f		\n" /* 8 bytes left */
+		"	ld.ab   %5, [%2,4]		\n"
+		"	ld.ab   %6, [%2,4]		\n"
+		"14:	st.ab   %5, [%1,4]		\n"
+		"15:	st.ab   %6, [%1,4]		\n"
+		"	sub.f   %0, %0, 8		\n"
+		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
+		"	ld.ab   %5, [%2,4]		\n"
+		"16:	st.ab   %5, [%1,4]		\n"
+		"	sub.f   %0, %0, 4		\n"
+		"32:	bbit0 %3,1,33f			\n"  /* 2 bytes left */
+		"	ldw.ab    %5, [%2,2]		\n"
+		"17:	stw.ab    %5, [%1,2]		\n"
+		"	sub.f %0, %0, 2			\n"
+		"33:	bbit0 %3,0,34f			\n"
+		"	ldb.ab    %5, [%2,1]		\n"  /* 1 byte left */
+		"18:	stb.ab  %5, [%1,1]		\n"
+		"	sub.f %0, %0, 1			\n"
+		"34:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"4:	j   34b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 4b			\n"
+		"	.word   11b,4b			\n"
+		"	.word   12b,4b			\n"
+		"	.word   13b,4b			\n"
+		"	.word   14b,4b			\n"
+		"	.word   15b,4b			\n"
+		"	.word   16b,4b			\n"
+		"	.word   17b,4b			\n"
+		"	.word   18b,4b			\n"
+		"	.previous			\n"
+		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+		:
+		: "lp_count", "memory");
+	}
+
+	return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+	long res = n;
+	unsigned char *d_char = to;
+
+	__asm__ __volatile__(
+	"	bbit0   %0, 0, 1f		\n"
+	"75:	stb.ab  %2, [%0,1]		\n"
+	"	sub %1, %1, 1			\n"
+	"1:	bbit0   %0, 1, 2f		\n"
+	"76:	stw.ab  %2, [%0,2]		\n"
+	"	sub %1, %1, 2			\n"
+	"2:	asr.f   lp_count, %1, 2		\n"
+	"	lpnz    3f			\n"
+	"77:	st.ab   %2, [%0,4]		\n"
+	"	sub %1, %1, 4			\n"
+	"3:	bbit0   %1, 1, 4f		\n"
+	"78:	stw.ab  %2, [%0,2]		\n"
+	"	sub %1, %1, 2			\n"
+	"4:	bbit0   %1, 0, 5f		\n"
+	"79:	stb.ab  %2, [%0,1]		\n"
+	"	sub %1, %1, 1			\n"
+	"5:					\n"
+	"	.section .fixup, \"ax\"		\n"
+	"	.align 4			\n"
+	"3:	j   5b				\n"
+	"	.previous			\n"
+	"	.section __ex_table, \"a\"	\n"
+	"	.align 4			\n"
+	"	.word   75b, 3b			\n"
+	"	.word   76b, 3b			\n"
+	"	.word   77b, 3b			\n"
+	"	.word   78b, 3b			\n"
+	"	.word   79b, 3b			\n"
+	"	.previous			\n"
+	: "+r"(d_char), "+r"(res)
+	: "i"(0)
+	: "lp_count", "lp_start", "lp_end", "memory");
+
+	return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	long res = 0;
+	char val;
+
+	if (count == 0)
+		return 0;
+
+	__asm__ __volatile__(
+	"	lp	3f			\n"
+	"1:	ldb.ab  %3, [%2, 1]		\n"
+	"	breq.d	%3, 0, 3f               \n"
+	"	stb.ab  %3, [%1, 1]		\n"
+	"	add	%0, %0, 1	# Num of NON NULL bytes copied	\n"
+	"3:								\n"
+	"	.section .fixup, \"ax\"		\n"
+	"	.align 4			\n"
+	"4:	mov %0, %4		# sets @res as -EFAULT	\n"
+	"	j   3b				\n"
+	"	.previous			\n"
+	"	.section __ex_table, \"a\"	\n"
+	"	.align 4			\n"
+	"	.word   1b, 4b			\n"
+	"	.previous			\n"
+	: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
+	: "g"(-EFAULT), "l"(count)
+	: "memory");
+
+	return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+	long res, tmp1, cnt;
+	char val;
+
+	__asm__ __volatile__(
+	"	mov %2, %1			\n"
+	"1:	ldb.ab  %3, [%0, 1]		\n"
+	"	breq.d  %3, 0, 2f		\n"
+	"	sub.f   %2, %2, 1		\n"
+	"	bnz 1b				\n"
+	"	sub %2, %2, 1			\n"
+	"2:	sub %0, %1, %2			\n"
+	"3:	;nop				\n"
+	"	.section .fixup, \"ax\"		\n"
+	"	.align 4			\n"
+	"4:	mov %0, 0			\n"
+	"	j   3b				\n"
+	"	.previous			\n"
+	"	.section __ex_table, \"a\"	\n"
+	"	.align 4			\n"
+	"	.word 1b, 4b			\n"
+	"	.previous			\n"
+	: "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+	: "0"(s), "1"(n)
+	: "memory");
+
+	return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n)	__arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n)		__arc_copy_to_user(t, f, n)
+#define __clear_user(d, n)		__arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n)	__arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n)		__arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+		unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+		unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+		unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+		long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n)	arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n)		arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n)		arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n)	arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n)		arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 0000000..6da6b4e
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_EMUL_UNALIGNED
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+		     struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+		 struct callee_regs *cregs)
+{
+	/* Not fixed */
+	return 1;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 0000000..c11a25b
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+	unsigned long r0;
+	unsigned long r1;
+	unsigned long r2;
+	unsigned long r3;
+	unsigned long r4;
+	unsigned long r5;
+	unsigned long r6;
+	unsigned long r7;
+	unsigned long r8;
+	unsigned long r9;
+	unsigned long r10;
+	unsigned long r11;
+	unsigned long r12;
+	unsigned long r13;
+	unsigned long r14;
+	unsigned long r15;
+	unsigned long r16;
+	unsigned long r17;
+	unsigned long r18;
+	unsigned long r19;
+	unsigned long r20;
+	unsigned long r21;
+	unsigned long r22;
+	unsigned long r23;
+	unsigned long r24;
+	unsigned long r25;
+	unsigned long r26;
+	unsigned long r27;	/* fp */
+	unsigned long r28;	/* sp */
+	unsigned long r29;
+	unsigned long r30;
+	unsigned long r31;	/* blink */
+	unsigned long r63;	/* pc */
+};
+
+struct unwind_frame_info {
+	struct arc700_regs regs;
+	struct task_struct *task;
+	unsigned call_frame:1;
+};
+
+#define UNW_PC(frame)		((frame)->regs.r63)
+#define UNW_SP(frame)		((frame)->regs.r28)
+#define UNW_BLINK(frame)	((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame)		((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET	4
+#define FRAME_LINK_OFFSET	0
+#define STACK_BOTTOM_UNW(tsk)	STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk)	((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame)		((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr)	(((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+	PTREGS_INFO(r0), \
+	PTREGS_INFO(r1), \
+	PTREGS_INFO(r2), \
+	PTREGS_INFO(r3), \
+	PTREGS_INFO(r4), \
+	PTREGS_INFO(r5), \
+	PTREGS_INFO(r6), \
+	PTREGS_INFO(r7), \
+	PTREGS_INFO(r8), \
+	PTREGS_INFO(r9), \
+	PTREGS_INFO(r10), \
+	PTREGS_INFO(r11), \
+	PTREGS_INFO(r12), \
+	PTREGS_INFO(r13), \
+	PTREGS_INFO(r14), \
+	PTREGS_INFO(r15), \
+	PTREGS_INFO(r16), \
+	PTREGS_INFO(r17), \
+	PTREGS_INFO(r18), \
+	PTREGS_INFO(r19), \
+	PTREGS_INFO(r20), \
+	PTREGS_INFO(r21), \
+	PTREGS_INFO(r22), \
+	PTREGS_INFO(r23), \
+	PTREGS_INFO(r24), \
+	PTREGS_INFO(r25), \
+	PTREGS_INFO(r26), \
+	PTREGS_INFO(r27), \
+	PTREGS_INFO(r28), \
+	PTREGS_INFO(r29), \
+	PTREGS_INFO(r30), \
+	PTREGS_INFO(r31), \
+	PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+	((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+			      unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+			 int (*callback) (struct unwind_frame_info *info,
+					  void *arg),
+			 void *arg)
+{
+	return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+	return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+	return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+					    struct pt_regs *regs)
+{
+	return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */