File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
new file mode 100644
index 0000000..502a91d
--- /dev/null
+++ b/arch/ia64/include/asm/Kbuild
@@ -0,0 +1,11 @@
+
+generic-y += clkdev.h
+generic-y += exec.h
+generic-y += irq_work.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += preempt.h
+generic-y += trace_clock.h
+generic-y += vtime.h
+generic-y += word-at-a-time.h
diff --git a/arch/ia64/include/asm/acenv.h b/arch/ia64/include/asm/acenv.h
new file mode 100644
index 0000000..35ff13a
--- /dev/null
+++ b/arch/ia64/include/asm/acenv.h
@@ -0,0 +1,52 @@
+/*
+ * IA64 specific ACPICA environments and implementation
+ *
+ * Copyright (C) 2014, Intel Corporation
+ *   Author: Lv Zheng <lv.zheng@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_IA64_ACENV_H
+#define _ASM_IA64_ACENV_H
+
+#include <asm/intrinsics.h>
+
+#define COMPILER_DEPENDENT_INT64	long
+#define COMPILER_DEPENDENT_UINT64	unsigned long
+
+/* Asm macros */
+
+static inline int
+ia64_acpi_acquire_global_lock(unsigned int *lock)
+{
+	unsigned int old, new, val;
+	do {
+		old = *lock;
+		new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+		val = ia64_cmpxchg4_acq(lock, new, old);
+	} while (unlikely (val != old));
+	return (new < 3) ? -1 : 0;
+}
+
+static inline int
+ia64_acpi_release_global_lock(unsigned int *lock)
+{
+	unsigned int old, new, val;
+	do {
+		old = *lock;
+		new = old & ~0x3;
+		val = ia64_cmpxchg4_acq(lock, new, old);
+	} while (unlikely (val != old));
+	return old & 0x1;
+}
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq)				\
+	((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq)				\
+	((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
+
+#endif /* _ASM_IA64_ACENV_H */
diff --git a/arch/ia64/include/asm/acpi-ext.h b/arch/ia64/include/asm/acpi-ext.h
new file mode 100644
index 0000000..7f8362b
--- /dev/null
+++ b/arch/ia64/include/asm/acpi-ext.h
@@ -0,0 +1,20 @@
+/*
+ * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
+ *	Alex Williamson <alex.williamson@hp.com>
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vendor specific extensions to ACPI.
+ */
+
+#ifndef _ASM_IA64_ACPI_EXT_H
+#define _ASM_IA64_ACPI_EXT_H
+
+#include <linux/types.h>
+
+extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
+
+#endif /* _ASM_IA64_ACPI_EXT_H */
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
new file mode 100644
index 0000000..aa0fdf1
--- /dev/null
+++ b/arch/ia64/include/asm/acpi.h
@@ -0,0 +1,147 @@
+/*
+ *  Copyright (C) 1999 VA Linux Systems
+ *  Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
+ *  Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#ifdef __KERNEL__
+
+#include <acpi/pdc_intel.h>
+
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <asm/numa.h>
+
+#ifdef	CONFIG_ACPI
+extern int acpi_lapic;
+#define acpi_disabled 0	/* ACPI always enabled on IA64 */
+#define acpi_noirq 0	/* ACPI always enabled on IA64 */
+#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
+#define acpi_strict 1	/* no ACPI spec workarounds on IA64 */
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+	return !!acpi_lapic;
+}
+#endif
+#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
+static inline void disable_acpi(void) { }
+
+#ifdef CONFIG_IA64_GENERIC
+const char *acpi_get_sysname (void);
+#else
+static inline const char *acpi_get_sysname (void)
+{
+# if defined (CONFIG_IA64_HP_SIM)
+	return "hpsim";
+# elif defined (CONFIG_IA64_HP_ZX1)
+	return "hpzx1";
+# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
+	return "hpzx1_swiotlb";
+# elif defined (CONFIG_IA64_SGI_SN2)
+	return "sn2";
+# elif defined (CONFIG_IA64_SGI_UV)
+	return "uv";
+# elif defined (CONFIG_IA64_DIG)
+	return "dig";
+# elif defined(CONFIG_IA64_DIG_VTD)
+	return "dig_vtd";
+# else
+#	error Unknown platform.  Fix acpi.c.
+# endif
+}
+#endif
+int acpi_request_vector (u32 int_type);
+int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+
+/* Low-level suspend routine. */
+extern int acpi_suspend_lowlevel(void);
+
+extern unsigned long acpi_wakeup_address;
+
+/*
+ * Record the cpei override flag and current logical cpu. This is
+ * useful for CPU removal.
+ */
+extern unsigned int can_cpei_retarget(void);
+extern unsigned int is_cpu_cpei_target(unsigned int cpu);
+extern void set_cpei_target_cpu(unsigned int cpu);
+extern unsigned int get_cpei_target_cpu(void);
+extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+#if MAX_NUMNODES > 256
+#define MAX_PXM_DOMAINS MAX_NUMNODES
+#else
+#define MAX_PXM_DOMAINS (256)
+#endif
+extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
+extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
+#endif
+
+static inline bool arch_has_acpi_pdc(void) { return true; }
+static inline void arch_acpi_set_pdc_bits(u32 *buf)
+{
+	buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+}
+
+#define acpi_unlazy_tlb(x)
+
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu)  \
+	for_each_cpu((cpu), &early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+	int low_cpu, high_cpu;
+	int cpu;
+	int next_nid = 0;
+
+	low_cpu = cpumask_weight(&early_cpu_possible_map);
+
+	high_cpu = max(low_cpu, min_cpus);
+	high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+		cpumask_set_cpu(cpu, &early_cpu_possible_map);
+		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+			node_cpuid[cpu].nid = next_nid;
+			next_nid++;
+			if (next_nid >= num_online_nodes())
+				next_nid = 0;
+		}
+	}
+}
+#endif /* CONFIG_ACPI_NUMA */
+
+#endif /*__KERNEL__*/
+
+#endif /*_ASM_ACPI_H*/
diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h
new file mode 100644
index 0000000..01d09c4
--- /dev/null
+++ b/arch/ia64/include/asm/agp.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_IA64_AGP_H
+#define _ASM_IA64_AGP_H
+
+/*
+ * IA-64 specific AGP definitions.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
+ * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
+ * (unlike x86, where it gets mapped "write-coalescing").
+ */
+#define map_page_into_agp(page)		/* nothing */
+#define unmap_page_from_agp(page)	/* nothing */
+#define flush_agp_cache()		mb()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order)		\
+	((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order)	\
+	free_pages((unsigned long)(table), (order))
+
+#endif /* _ASM_IA64_AGP_H */
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h
new file mode 100644
index 0000000..3ab6d75
--- /dev/null
+++ b/arch/ia64/include/asm/asmmacro.h
@@ -0,0 +1,135 @@
+#ifndef _ASM_IA64_ASMMACRO_H
+#define _ASM_IA64_ASMMACRO_H
+
+/*
+ * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#define ENTRY(name)				\
+	.align 32;				\
+	.proc name;				\
+name:
+
+#define ENTRY_MIN_ALIGN(name)			\
+	.align 16;				\
+	.proc name;				\
+name:
+
+#define GLOBAL_ENTRY(name)			\
+	.global name;				\
+	ENTRY(name)
+
+#define END(name)				\
+	.endp name
+
+/*
+ * Helper macros to make unwind directives more readable:
+ */
+
+/* prologue_gr: */
+#define ASM_UNW_PRLG_RP			0x8
+#define ASM_UNW_PRLG_PFS		0x4
+#define ASM_UNW_PRLG_PSP		0x2
+#define ASM_UNW_PRLG_PR			0x1
+#define ASM_UNW_PRLG_GRSAVE(ninputs)	(32+(ninputs))
+
+/*
+ * Helper macros for accessing user memory.
+ *
+ * When adding any new .section/.previous entries here, make sure to
+ * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
+ * unpleasant things will happen.
+ */
+
+	.section "__ex_table", "a"		// declare section & section attributes
+	.previous
+
+# define EX(y,x...)				\
+	.xdata4 "__ex_table", 99f-., y-.;	\
+  [99:]	x
+# define EXCLR(y,x...)				\
+	.xdata4 "__ex_table", 99f-., y-.+4;	\
+  [99:]	x
+
+/*
+ * Tag MCA recoverable instruction ranges.
+ */
+
+	.section "__mca_table", "a"		// declare section & section attributes
+	.previous
+
+# define MCA_RECOVER_RANGE(y)			\
+	.xdata4 "__mca_table", y-., 99f-.;	\
+  [99:]
+
+/*
+ * Mark instructions that need a load of a virtual address patched to be
+ * a load of a physical address.  We use this either in critical performance
+ * path (ivt.S - TLB miss processing) or in places where it might not be
+ * safe to use a "tpa" instruction (mca_asm.S - error recovery).
+ */
+	.section ".data..patch.vtop", "a"	// declare section & section attributes
+	.previous
+
+#define	LOAD_PHYSICAL(pr, reg, obj)		\
+[1:](pr)movl reg = obj;				\
+	.xdata4 ".data..patch.vtop", 1b-.
+
+/*
+ * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
+ * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
+ */
+#define DO_MCKINLEY_E9_WORKAROUND
+
+#ifdef DO_MCKINLEY_E9_WORKAROUND
+	.section ".data..patch.mckinley_e9", "a"
+	.previous
+/* workaround for Itanium 2 Errata 9: */
+# define FSYS_RETURN					\
+	.xdata4 ".data..patch.mckinley_e9", 1f-.;	\
+1:{ .mib;						\
+	nop.m 0;					\
+	mov r16=ar.pfs;					\
+	br.call.sptk.many b7=2f;;			\
+  };							\
+2:{ .mib;						\
+	nop.m 0;					\
+	mov ar.pfs=r16;					\
+	br.ret.sptk.many b6;;				\
+  }
+#else
+# define FSYS_RETURN	br.ret.sptk.many b6
+#endif
+
+/*
+ * If physical stack register size is different from DEF_NUM_STACK_REG,
+ * dynamically patch the kernel for correct size.
+ */
+	.section ".data..patch.phys_stack_reg", "a"
+	.previous
+#define LOAD_PHYS_STACK_REG_SIZE(reg)			\
+[1:]	adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0;	\
+	.xdata4 ".data..patch.phys_stack_reg", 1b-.
+
+/*
+ * Up until early 2004, use of .align within a function caused bad unwind info.
+ * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
+ * otherwise.
+ */
+#ifdef HAVE_WORKING_TEXT_ALIGN
+# define TEXT_ALIGN(n)	.align n
+#else
+# define TEXT_ALIGN(n)
+#endif
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define dv_serialize_data		.serialize.data
+# define dv_serialize_instruction	.serialize.instruction
+#else
+# define dv_serialize_data
+# define dv_serialize_instruction
+#endif
+
+#endif /* _ASM_IA64_ASMMACRO_H */
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
new file mode 100644
index 0000000..8dfb5f6
--- /dev/null
+++ b/arch/ia64/include/asm/atomic.h
@@ -0,0 +1,212 @@
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below!  The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/types.h>
+
+#include <asm/intrinsics.h>
+#include <asm/barrier.h>
+
+
+#define ATOMIC_INIT(i)		{ (i) }
+#define ATOMIC64_INIT(i)	{ (i) }
+
+#define atomic_read(v)		READ_ONCE((v)->counter)
+#define atomic64_read(v)	READ_ONCE((v)->counter)
+
+#define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
+#define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
+
+#define ATOMIC_OP(op, c_op)						\
+static __inline__ int							\
+ia64_atomic_##op (int i, atomic_t *v)					\
+{									\
+	__s32 old, new;							\
+	CMPXCHG_BUGCHECK_DECL						\
+									\
+	do {								\
+		CMPXCHG_BUGCHECK(v);					\
+		old = atomic_read(v);					\
+		new = old c_op i;					\
+	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+	return new;							\
+}
+
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
+
+#define atomic_add_return(i,v)						\
+({									\
+	int __ia64_aar_i = (i);						\
+	(__builtin_constant_p(i)					\
+	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
+	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
+	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
+	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
+		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
+		: ia64_atomic_add(__ia64_aar_i, v);			\
+})
+
+#define atomic_sub_return(i,v)						\
+({									\
+	int __ia64_asr_i = (i);						\
+	(__builtin_constant_p(i)					\
+	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
+	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
+	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
+	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
+		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
+		: ia64_atomic_sub(__ia64_asr_i, v);			\
+})
+
+ATOMIC_OP(and, &)
+ATOMIC_OP(or, |)
+ATOMIC_OP(xor, ^)
+
+#define atomic_and(i,v)	(void)ia64_atomic_and(i,v)
+#define atomic_or(i,v)	(void)ia64_atomic_or(i,v)
+#define atomic_xor(i,v)	(void)ia64_atomic_xor(i,v)
+
+#undef ATOMIC_OP
+
+#define ATOMIC64_OP(op, c_op)						\
+static __inline__ long							\
+ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
+{									\
+	__s64 old, new;							\
+	CMPXCHG_BUGCHECK_DECL						\
+									\
+	do {								\
+		CMPXCHG_BUGCHECK(v);					\
+		old = atomic64_read(v);					\
+		new = old c_op i;					\
+	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+	return new;							\
+}
+
+ATOMIC64_OP(add, +)
+ATOMIC64_OP(sub, -)
+
+#define atomic64_add_return(i,v)					\
+({									\
+	long __ia64_aar_i = (i);					\
+	(__builtin_constant_p(i)					\
+	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
+	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
+	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
+	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
+		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
+		: ia64_atomic64_add(__ia64_aar_i, v);			\
+})
+
+#define atomic64_sub_return(i,v)					\
+({									\
+	long __ia64_asr_i = (i);					\
+	(__builtin_constant_p(i)					\
+	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
+	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
+	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
+	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
+		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
+		: ia64_atomic64_sub(__ia64_asr_i, v);			\
+})
+
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#define atomic64_and(i,v)	(void)ia64_atomic64_and(i,v)
+#define atomic64_or(i,v)	(void)ia64_atomic64_or(i,v)
+#define atomic64_xor(i,v)	(void)ia64_atomic64_xor(i,v)
+
+#undef ATOMIC64_OP
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic64_cmpxchg(v, old, new) \
+	(cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int c, old;
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+		old = atomic_cmpxchg((v), c, c + (a));
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c;
+}
+
+
+static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+	long c, old;
+	c = atomic64_read(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+		old = atomic64_cmpxchg((v), c, c + (a));
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+	return atomic_add_return(i, v) < 0;
+}
+
+static __inline__ long
+atomic64_add_negative (__s64 i, atomic64_t *v)
+{
+	return atomic64_add_return(i, v) < 0;
+}
+
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
+#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
+#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
+
+#define atomic_add(i,v)			(void)atomic_add_return((i), (v))
+#define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
+#define atomic_inc(v)			atomic_add(1, (v))
+#define atomic_dec(v)			atomic_sub(1, (v))
+
+#define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
+#define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
+#define atomic64_inc(v)			atomic64_add(1, (v))
+#define atomic64_dec(v)			atomic64_sub(1, (v))
+
+#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
new file mode 100644
index 0000000..df896a1
--- /dev/null
+++ b/arch/ia64/include/asm/barrier.h
@@ -0,0 +1,88 @@
+/*
+ * Memory barrier definitions.  This is based on information published
+ * in the Processor Abstraction Layer and the System Abstraction Layer
+ * manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+#ifndef _ASM_IA64_BARRIER_H
+#define _ASM_IA64_BARRIER_H
+
+#include <linux/compiler.h>
+
+/*
+ * Macros to force memory ordering.  In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ *   wmb():	Guarantees that all preceding stores to memory-
+ *		like regions are visible before any subsequent
+ *		stores and that all following stores will be
+ *		visible only after all previous stores.
+ *   rmb():	Like wmb(), but for reads.
+ *   mb():	wmb()/rmb() combo, i.e., all previous memory
+ *		accesses are visible before all subsequent
+ *		accesses and vice versa.  This is also known as
+ *		a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers.  For that, mf.a needs to
+ * be used.  However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+#define mb()		ia64_mf()
+#define rmb()		mb()
+#define wmb()		mb()
+
+#define dma_rmb()	mb()
+#define dma_wmb()	mb()
+
+#ifdef CONFIG_SMP
+# define smp_mb()	mb()
+#else
+# define smp_mb()	barrier()
+#endif
+
+#define smp_rmb()	smp_mb()
+#define smp_wmb()	smp_mb()
+
+#define read_barrier_depends()		do { } while (0)
+#define smp_read_barrier_depends()	do { } while (0)
+
+#define smp_mb__before_atomic()	barrier()
+#define smp_mb__after_atomic()	barrier()
+
+/*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	WRITE_ONCE(*p, v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	___p1;								\
+})
+
+#define smp_store_mb(var, value)	do { WRITE_ONCE(var, value); mb(); } while (0)
+
+/*
+ * The group barrier in front of the rsm & ssm are necessary to ensure
+ * that none of the previous instructions in the same group are
+ * affected by the rsm/ssm.
+ */
+
+#endif /* _ASM_IA64_BARRIER_H */
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
new file mode 100644
index 0000000..71e8145
--- /dev/null
+++ b/arch/ia64/include/asm/bitops.h
@@ -0,0 +1,456 @@
+#ifndef _ASM_IA64_BITOPS_H
+#define _ASM_IA64_BITOPS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
+ * O(1) scheduler patch
+ */
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+#include <asm/barrier.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ *
+ * The address must be (at least) "long" aligned.
+ * Note that there are driver (e.g., eepro100) which use these operations to
+ * operate on hw-defined data-structures, so we can't easily change these
+ * operations to force a bigger alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+static __inline__ void
+set_bit (int nr, volatile void *addr)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = 1 << (nr & 31);
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old | bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__set_bit (int nr, volatile void *addr)
+{
+	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void
+clear_bit (int nr, volatile void *addr)
+{
+	__u32 mask, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	mask = ~(1 << (nr & 31));
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old & mask;
+	} while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * clear_bit_unlock - Clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit_unlock() is atomic and may not be reordered.  It does
+ * contain a memory barrier suitable for unlock type operations.
+ */
+static __inline__ void
+clear_bit_unlock (int nr, volatile void *addr)
+{
+	__u32 mask, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	mask = ~(1 << (nr & 31));
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old & mask;
+	} while (cmpxchg_rel(m, old, new) != old);
+}
+
+/**
+ * __clear_bit_unlock - Non-atomically clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Similarly to clear_bit_unlock, the implementation uses a store
+ * with release semantics. See also arch_spin_unlock().
+ */
+static __inline__ void
+__clear_bit_unlock(int nr, void *addr)
+{
+	__u32 * const m = (__u32 *) addr + (nr >> 5);
+	__u32 const new = *m & ~(1 << (nr & 31));
+
+	ia64_st4_rel_nta(m, new);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory (non-atomic version)
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__clear_bit (int nr, volatile void *addr)
+{
+	*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to toggle
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void
+change_bit (int nr, volatile void *addr)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = (1 << (nr & 31));
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old ^ bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to toggle
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__change_bit (int nr, volatile void *addr)
+{
+	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_set_bit (int nr, volatile void *addr)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = 1 << (nr & 31);
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old | bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+	return (old & bit) != 0;
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on ia64
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.  
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_set_bit (int nr, volatile void *addr)
+{
+	__u32 *p = (__u32 *) addr + (nr >> 5);
+	__u32 m = 1 << (nr & 31);
+	int oldbitset = (*p & m) != 0;
+
+	*p |= m;
+	return oldbitset;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_clear_bit (int nr, volatile void *addr)
+{
+	__u32 mask, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	mask = ~(1 << (nr & 31));
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old & mask;
+	} while (cmpxchg_acq(m, old, new) != old);
+	return (old & ~mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.  
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_clear_bit(int nr, volatile void * addr)
+{
+	__u32 *p = (__u32 *) addr + (nr >> 5);
+	__u32 m = 1 << (nr & 31);
+	int oldbitset = (*p & m) != 0;
+
+	*p &= ~m;
+	return oldbitset;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_change_bit (int nr, volatile void *addr)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = (1 << (nr & 31));
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old ^ bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+	return (old & bit) != 0;
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ */
+static __inline__ int
+__test_and_change_bit (int nr, void *addr)
+{
+	__u32 old, bit = (1 << (nr & 31));
+	__u32 *m = (__u32 *) addr + (nr >> 5);
+
+	old = *m;
+	*m = old ^ bit;
+	return (old & bit) != 0;
+}
+
+static __inline__ int
+test_bit (int nr, const volatile void *addr)
+{
+	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/**
+ * ffz - find the first zero bit in a long word
+ * @x: The long word to find the bit in
+ *
+ * Returns the bit-number (0..63) of the first (least significant) zero bit.
+ * Undefined if no zero exists, so code should check against ~0UL first...
+ */
+static inline unsigned long
+ffz (unsigned long x)
+{
+	unsigned long result;
+
+	result = ia64_popcnt(x & (~x - 1));
+	return result;
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @x: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long
+__ffs (unsigned long x)
+{
+	unsigned long result;
+
+	result = ia64_popcnt((x-1) & ~x);
+	return result;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Return bit number of last (most-significant) bit set.  Undefined
+ * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
+ */
+static inline unsigned long
+ia64_fls (unsigned long x)
+{
+	long double d = x;
+	long exp;
+
+	exp = ia64_getf_exp(d);
+	return exp - 0xffff;
+}
+
+/*
+ * Find the last (most significant) bit set.  Returns 0 for x==0 and
+ * bits are numbered from 1..32 (e.g., fls(9) == 4).
+ */
+static inline int
+fls (int t)
+{
+	unsigned long x = t & 0xffffffffu;
+
+	if (!x)
+		return 0;
+	x |= x >> 1;
+	x |= x >> 2;
+	x |= x >> 4;
+	x |= x >> 8;
+	x |= x >> 16;
+	return ia64_popcnt(x);
+}
+
+/*
+ * Find the last (most significant) bit set.  Undefined for x==0.
+ * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
+ */
+static inline unsigned long
+__fls (unsigned long x)
+{
+	x |= x >> 1;
+	x |= x >> 2;
+	x |= x >> 4;
+	x |= x >> 8;
+	x |= x >> 16;
+	x |= x >> 32;
+	return ia64_popcnt(x) - 1;
+}
+
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+static __inline__ unsigned long __arch_hweight64(unsigned long x)
+{
+	unsigned long result;
+	result = ia64_popcnt(x);
+	return result;
+}
+
+#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
+#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
+#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
+
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/find.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_BITOPS_H */
diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h
new file mode 100644
index 0000000..823616b
--- /dev/null
+++ b/arch/ia64/include/asm/bug.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_IA64_BUG_H
+#define _ASM_IA64_BUG_H
+
+#ifdef CONFIG_BUG
+#define ia64_abort()	__builtin_trap()
+#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
+
+/* should this BUG be made generic? */
+#define HAVE_ARCH_BUG
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/arch/ia64/include/asm/bugs.h b/arch/ia64/include/asm/bugs.h
new file mode 100644
index 0000000..433523e
--- /dev/null
+++ b/arch/ia64/include/asm/bugs.h
@@ -0,0 +1,19 @@
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ *	void check_bugs(void);
+ *
+ * Based on <asm-alpha/bugs.h>.
+ *
+ * Modified 1998, 1999, 2003
+ *	David Mosberger-Tang <davidm@hpl.hp.com>,  Hewlett-Packard Co.
+ */
+#ifndef _ASM_IA64_BUGS_H
+#define _ASM_IA64_BUGS_H
+
+#include <asm/processor.h>
+
+extern void check_bugs (void);
+
+#endif /* _ASM_IA64_BUGS_H */
diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
new file mode 100644
index 0000000..988254a
--- /dev/null
+++ b/arch/ia64/include/asm/cache.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_CACHE_H
+#define _ASM_IA64_CACHE_H
+
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/* Bytes per L1 (data) cache line.  */
+#define L1_CACHE_SHIFT		CONFIG_IA64_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#ifdef CONFIG_SMP
+# define SMP_CACHE_SHIFT	L1_CACHE_SHIFT
+# define SMP_CACHE_BYTES	L1_CACHE_BYTES
+#else
+  /*
+   * The "aligned" directive can only _increase_ alignment, so this is
+   * safe and provides an easy way to avoid wasting space on a
+   * uni-processor:
+   */
+# define SMP_CACHE_SHIFT	3
+# define SMP_CACHE_BYTES	(1 << 3)
+#endif
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#endif /* _ASM_IA64_CACHE_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
new file mode 100644
index 0000000..429eefc
--- /dev/null
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_IA64_CACHEFLUSH_H
+#define _ASM_IA64_CACHEFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/page-flags.h>
+#include <linux/bitops.h>
+
+#include <asm/page.h>
+
+/*
+ * Cache flushing routines.  This is the kind of stuff that can be very expensive, so try
+ * to avoid them whenever possible.
+ */
+
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_dup_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_icache_page(vma,page)		do { } while (0)
+#define flush_cache_vmap(start, end)		do { } while (0)
+#define flush_cache_vunmap(start, end)		do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+#define flush_dcache_page(page)			\
+do {						\
+	clear_bit(PG_arch_1, &(page)->flags);	\
+} while (0)
+
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+extern void flush_icache_range (unsigned long start, unsigned long end);
+extern void clflush_cache_range(void *addr, int size);
+
+
+#define flush_icache_user_range(vma, page, user_addr, len)					\
+do {												\
+	unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK);	\
+	flush_icache_range(_addr, _addr + (len));						\
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { memcpy(dst, src, len); \
+     flush_icache_user_range(vma, page, vaddr, len); \
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
+
+#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h
new file mode 100644
index 0000000..97af155
--- /dev/null
+++ b/arch/ia64/include/asm/checksum.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_IA64_CHECKSUM_H
+#define _ASM_IA64_CHECKSUM_H
+
+/*
+ * Modified 1998, 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
+ * checksum, already complemented
+ */
+extern __sum16 csum_tcpudp_magic (__be32 saddr, __be32 daddr,
+					     unsigned short len,
+					     unsigned short proto,
+					     __wsum sum);
+
+extern __wsum csum_tcpudp_nofold (__be32 saddr, __be32 daddr,
+					unsigned short len,
+					unsigned short proto,
+					__wsum sum);
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * Same as csum_partial, but copies from src while it checksums.
+ *
+ * Here it is even more important to align src and dst on a 32-bit (or
+ * even better 64-bit) boundary.
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						 int len, __wsum sum,
+						 int *errp);
+
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					       int len, __wsum sum);
+
+/*
+ * This routine is used for miscellaneous IP-like checksums, mainly in
+ * icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+/*
+ * Fold a partial checksum without adding pseudo headers.
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+	u32 sum = (__force u32)csum;
+	sum = (sum & 0xffff) + (sum >> 16);
+	sum = (sum & 0xffff) + (sum >> 16);
+	return (__force __sum16)~sum;
+}
+
+#define _HAVE_ARCH_IPV6_CSUM	1
+struct in6_addr;
+extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+	const struct in6_addr *daddr, __u32 len, unsigned short proto,
+	__wsum csum);
+
+#endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/arch/ia64/include/asm/clocksource.h b/arch/ia64/include/asm/clocksource.h
new file mode 100644
index 0000000..5c8596e
--- /dev/null
+++ b/arch/ia64/include/asm/clocksource.h
@@ -0,0 +1,10 @@
+/* IA64-specific clocksource additions */
+
+#ifndef _ASM_IA64_CLOCKSOURCE_H
+#define _ASM_IA64_CLOCKSOURCE_H
+
+struct arch_clocksource_data {
+	void *fsys_mmio;        /* used by fsyscall asm code */
+};
+
+#endif /* _ASM_IA64_CLOCKSOURCE_H */
diff --git a/arch/ia64/include/asm/cpu.h b/arch/ia64/include/asm/cpu.h
new file mode 100644
index 0000000..fcca30b
--- /dev/null
+++ b/arch/ia64/include/asm/cpu.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_CPU_H_
+#define _ASM_IA64_CPU_H_
+
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/percpu.h>
+
+struct ia64_cpu {
+	struct cpu cpu;
+};
+
+DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
+
+DECLARE_PER_CPU(int, cpu_state);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int arch_register_cpu(int num);
+extern void arch_unregister_cpu(int);
+#endif
+
+#endif /* _ASM_IA64_CPU_H_ */
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
new file mode 100644
index 0000000..e2d3f5b
--- /dev/null
+++ b/arch/ia64/include/asm/cputime.h
@@ -0,0 +1,29 @@
+/*
+ * Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
+#ifndef __IA64_CPUTIME_H
+#define __IA64_CPUTIME_H
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+# include <asm-generic/cputime.h>
+#else
+# include <asm/processor.h>
+# include <asm-generic/cputime_nsecs.h>
+extern void arch_vtime_task_switch(struct task_struct *tsk);
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+
+#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/current.h b/arch/ia64/include/asm/current.h
new file mode 100644
index 0000000..c659f90
--- /dev/null
+++ b/arch/ia64/include/asm/current.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_IA64_CURRENT_H
+#define _ASM_IA64_CURRENT_H
+
+/*
+ * Modified 1998-2000
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/intrinsics.h>
+
+/*
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
+#define current	((struct task_struct *) ia64_getreg(_IA64_REG_TP))
+
+#endif /* _ASM_IA64_CURRENT_H */
diff --git a/arch/ia64/include/asm/cyclone.h b/arch/ia64/include/asm/cyclone.h
new file mode 100644
index 0000000..88f6500
--- /dev/null
+++ b/arch/ia64/include/asm/cyclone.h
@@ -0,0 +1,15 @@
+#ifndef ASM_IA64_CYCLONE_H
+#define ASM_IA64_CYCLONE_H
+
+#ifdef	CONFIG_IA64_CYCLONE
+extern int use_cyclone;
+extern void __init cyclone_setup(void);
+#else	/* CONFIG_IA64_CYCLONE */
+#define use_cyclone 0
+static inline void cyclone_setup(void)
+{
+	printk(KERN_ERR "Cyclone Counter: System not configured"
+					" w/ CONFIG_IA64_CYCLONE.\n");
+}
+#endif	/* CONFIG_IA64_CYCLONE */
+#endif	/* !ASM_IA64_CYCLONE_H */
diff --git a/arch/ia64/include/asm/delay.h b/arch/ia64/include/asm/delay.h
new file mode 100644
index 0000000..a30a62f
--- /dev/null
+++ b/arch/ia64/include/asm/delay.h
@@ -0,0 +1,88 @@
+#ifndef _ASM_IA64_DELAY_H
+#define _ASM_IA64_DELAY_H
+
+/*
+ * Delay routines using a pre-computed "cycles/usec" value.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+static __inline__ void
+ia64_set_itm (unsigned long val)
+{
+	ia64_setreg(_IA64_REG_CR_ITM, val);
+	ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itm (void)
+{
+	unsigned long result;
+
+	result = ia64_getreg(_IA64_REG_CR_ITM);
+	ia64_srlz_d();
+	return result;
+}
+
+static __inline__ void
+ia64_set_itv (unsigned long val)
+{
+	ia64_setreg(_IA64_REG_CR_ITV, val);
+	ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itv (void)
+{
+	return ia64_getreg(_IA64_REG_CR_ITV);
+}
+
+static __inline__ void
+ia64_set_itc (unsigned long val)
+{
+	ia64_setreg(_IA64_REG_AR_ITC, val);
+	ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itc (void)
+{
+	unsigned long result;
+
+	result = ia64_getreg(_IA64_REG_AR_ITC);
+	ia64_barrier();
+#ifdef CONFIG_ITANIUM
+	while (unlikely((__s32) result == -1)) {
+		result = ia64_getreg(_IA64_REG_AR_ITC);
+		ia64_barrier();
+	}
+#endif
+	return result;
+}
+
+extern void ia64_delay_loop (unsigned long loops);
+
+static __inline__ void
+__delay (unsigned long loops)
+{
+	if (unlikely(loops < 1))
+		return;
+
+	ia64_delay_loop (loops - 1);
+}
+
+extern void udelay (unsigned long usecs);
+
+#endif /* _ASM_IA64_DELAY_H */
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
new file mode 100644
index 0000000..f69c32f
--- /dev/null
+++ b/arch/ia64/include/asm/device.h
@@ -0,0 +1,18 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_IA64_DEVICE_H
+#define _ASM_IA64_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_INTEL_IOMMU
+	void *iommu; /* hook for IOMMU specific extension */
+#endif
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/div64.h b/arch/ia64/include/asm/div64.h
new file mode 100644
index 0000000..6cd978c
--- /dev/null
+++ b/arch/ia64/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
new file mode 100644
index 0000000..9beccf8
--- /dev/null
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_IA64_DMA_MAPPING_H
+#define _ASM_IA64_DMA_MAPPING_H
+
+/*
+ * Copyright (C) 2003-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <asm/machvec.h>
+#include <linux/scatterlist.h>
+#include <asm/swiotlb.h>
+#include <linux/dma-debug.h>
+
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
+#define DMA_ERROR_CODE 0
+
+extern struct dma_map_ops *dma_ops;
+extern struct ia64_machine_vector ia64_mv;
+extern void set_iommu_machvec(void);
+
+extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
+				    enum dma_data_direction);
+extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
+				enum dma_data_direction);
+
+#define get_dma_ops(dev) platform_dma_get_ops(dev)
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+	if (!dev->dma_mask)
+		return 0;
+
+	return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	return daddr;
+}
+
+static inline void
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction dir)
+{
+	/*
+	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
+	 * ensure that dma_cache_sync() enforces order, hence the mb().
+	 */
+	mb();
+}
+
+#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
new file mode 100644
index 0000000..4d97f60
--- /dev/null
+++ b/arch/ia64/include/asm/dma.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_IA64_DMA_H
+#define _ASM_IA64_DMA_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/io.h>		/* need byte IO */
+
+extern unsigned long MAX_DMA_ADDRESS;
+
+#ifdef CONFIG_PCI
+  extern int isa_dma_bridge_buggy;
+#else
+# define isa_dma_bridge_buggy 	(0)
+#endif
+
+#define free_dma(x)
+
+void dma_mark_clean(void *addr, size_t size);
+
+#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h
new file mode 100644
index 0000000..f365a61
--- /dev/null
+++ b/arch/ia64/include/asm/dmi.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H 1
+
+#include <linux/slab.h>
+#include <asm/io.h>
+
+/* Use normal IO mappings for DMI */
+#define dmi_early_remap		ioremap
+#define dmi_early_unmap(x, l)	iounmap(x)
+#define dmi_remap		ioremap
+#define dmi_unmap		iounmap
+#define dmi_alloc(l)		kzalloc(l, GFP_ATOMIC)
+
+#endif
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
new file mode 100644
index 0000000..5a83c5c
--- /dev/null
+++ b/arch/ia64/include/asm/elf.h
@@ -0,0 +1,234 @@
+#ifndef _ASM_IA64_ELF_H
+#define _ASM_IA64_ELF_H
+
+/*
+ * ELF-specific definitions.
+ *
+ * Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/auxvec.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_IA_64)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS64
+#define ELF_DATA	ELFDATA2LSB
+#define ELF_ARCH	EM_IA_64
+
+#define CORE_DUMP_USE_REGSET
+
+/* Least-significant four bits of ELF header's e_flags are OS-specific.  The bits are
+   interpreted as follows by Linux: */
+#define EF_IA_64_LINUX_EXECUTABLE_STACK	0x1	/* is stack (& heap) executable by default? */
+
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader.  We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x800000000UL)
+
+#define PT_IA_64_UNWIND		0x70000001
+
+/* IA-64 relocations: */
+#define R_IA64_NONE		0x00	/* none */
+#define R_IA64_IMM14		0x21	/* symbol + addend, add imm14 */
+#define R_IA64_IMM22		0x22	/* symbol + addend, add imm22 */
+#define R_IA64_IMM64		0x23	/* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB		0x24	/* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB		0x25	/* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB		0x26	/* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB		0x27	/* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22		0x2a	/* @gprel(sym+add), add imm22 */
+#define R_IA64_GPREL64I		0x2b	/* @gprel(sym+add), mov imm64 */
+#define R_IA64_GPREL32MSB	0x2c	/* @gprel(sym+add), data4 MSB */
+#define R_IA64_GPREL32LSB	0x2d	/* @gprel(sym+add), data4 LSB */
+#define R_IA64_GPREL64MSB	0x2e	/* @gprel(sym+add), data8 MSB */
+#define R_IA64_GPREL64LSB	0x2f	/* @gprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF22		0x32	/* @ltoff(sym+add), add imm22 */
+#define R_IA64_LTOFF64I		0x33	/* @ltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF22		0x3a	/* @pltoff(sym+add), add imm22 */
+#define R_IA64_PLTOFF64I	0x3b	/* @pltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF64MSB	0x3e	/* @pltoff(sym+add), data8 MSB */
+#define R_IA64_PLTOFF64LSB	0x3f	/* @pltoff(sym+add), data8 LSB */
+#define R_IA64_FPTR64I		0x43	/* @fptr(sym+add), mov imm64 */
+#define R_IA64_FPTR32MSB	0x44	/* @fptr(sym+add), data4 MSB */
+#define R_IA64_FPTR32LSB	0x45	/* @fptr(sym+add), data4 LSB */
+#define R_IA64_FPTR64MSB	0x46	/* @fptr(sym+add), data8 MSB */
+#define R_IA64_FPTR64LSB	0x47	/* @fptr(sym+add), data8 LSB */
+#define R_IA64_PCREL60B		0x48	/* @pcrel(sym+add), brl */
+#define R_IA64_PCREL21B		0x49	/* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL21M		0x4a	/* @pcrel(sym+add), chk.s */
+#define R_IA64_PCREL21F		0x4b	/* @pcrel(sym+add), fchkf */
+#define R_IA64_PCREL32MSB	0x4c	/* @pcrel(sym+add), data4 MSB */
+#define R_IA64_PCREL32LSB	0x4d	/* @pcrel(sym+add), data4 LSB */
+#define R_IA64_PCREL64MSB	0x4e	/* @pcrel(sym+add), data8 MSB */
+#define R_IA64_PCREL64LSB	0x4f	/* @pcrel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22	0x52	/* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I	0x53	/* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB	0x54	/* @ltoff(@fptr(s+a)), 4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB	0x55	/* @ltoff(@fptr(s+a)), 4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB	0x56	/* @ltoff(@fptr(s+a)), 8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB	0x57	/* @ltoff(@fptr(s+a)), 8 LSB */
+#define R_IA64_SEGREL32MSB	0x5c	/* @segrel(sym+add), data4 MSB */
+#define R_IA64_SEGREL32LSB	0x5d	/* @segrel(sym+add), data4 LSB */
+#define R_IA64_SEGREL64MSB	0x5e	/* @segrel(sym+add), data8 MSB */
+#define R_IA64_SEGREL64LSB	0x5f	/* @segrel(sym+add), data8 LSB */
+#define R_IA64_SECREL32MSB	0x64	/* @secrel(sym+add), data4 MSB */
+#define R_IA64_SECREL32LSB	0x65	/* @secrel(sym+add), data4 LSB */
+#define R_IA64_SECREL64MSB	0x66	/* @secrel(sym+add), data8 MSB */
+#define R_IA64_SECREL64LSB	0x67	/* @secrel(sym+add), data8 LSB */
+#define R_IA64_REL32MSB		0x6c	/* data 4 + REL */
+#define R_IA64_REL32LSB		0x6d	/* data 4 + REL */
+#define R_IA64_REL64MSB		0x6e	/* data 8 + REL */
+#define R_IA64_REL64LSB		0x6f	/* data 8 + REL */
+#define R_IA64_LTV32MSB		0x74	/* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB		0x75	/* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB		0x76	/* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB		0x77	/* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI	0x79	/* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL22		0x7a	/* @pcrel(sym+add), imm22 */
+#define R_IA64_PCREL64I		0x7b	/* @pcrel(sym+add), imm64 */
+#define R_IA64_IPLTMSB		0x80	/* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB		0x81	/* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY		0x84	/* dynamic reloc, data copy */
+#define R_IA64_SUB		0x85	/* -symbol + addend, add imm22 */
+#define R_IA64_LTOFF22X		0x86	/* LTOFF22, relaxable.  */
+#define R_IA64_LDXMOV		0x87	/* Use of LTOFF22X.  */
+#define R_IA64_TPREL14		0x91	/* @tprel(sym+add), add imm14 */
+#define R_IA64_TPREL22		0x92	/* @tprel(sym+add), add imm22 */
+#define R_IA64_TPREL64I		0x93	/* @tprel(sym+add), add imm64 */
+#define R_IA64_TPREL64MSB	0x96	/* @tprel(sym+add), data8 MSB */
+#define R_IA64_TPREL64LSB	0x97	/* @tprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22	0x9a	/* @ltoff(@tprel(s+a)), add imm22 */
+#define R_IA64_DTPMOD64MSB	0xa6	/* @dtpmod(sym+add), data8 MSB */
+#define R_IA64_DTPMOD64LSB	0xa7	/* @dtpmod(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22	0xaa	/* @ltoff(@dtpmod(s+a)), imm22 */
+#define R_IA64_DTPREL14		0xb1	/* @dtprel(sym+add), imm14 */
+#define R_IA64_DTPREL22		0xb2	/* @dtprel(sym+add), imm22 */
+#define R_IA64_DTPREL64I	0xb3	/* @dtprel(sym+add), imm64 */
+#define R_IA64_DTPREL32MSB	0xb4	/* @dtprel(sym+add), data4 MSB */
+#define R_IA64_DTPREL32LSB	0xb5	/* @dtprel(sym+add), data4 LSB */
+#define R_IA64_DTPREL64MSB	0xb6	/* @dtprel(sym+add), data8 MSB */
+#define R_IA64_DTPREL64LSB	0xb7	/* @dtprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22	0xba	/* @ltoff(@dtprel(s+a)), imm22 */
+
+/* IA-64 specific section flags: */
+#define SHF_IA_64_SHORT		0x10000000	/* section near gp */
+
+/*
+ * We use (abuse?) this macro to insert the (empty) vm_area that is
+ * used to map the register backing store.  I don't see any better
+ * place to do this, but we should discuss this with Linus once we can
+ * talk to him...
+ */
+extern void ia64_init_addr_space (void);
+#define ELF_PLAT_INIT(_r, load_addr)	ia64_init_addr_space()
+
+/* ELF register definitions.  This is needed for core dump support.  */
+
+/*
+ * elf_gregset_t contains the application-level state in the following order:
+ *	r0-r31
+ *	NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
+ *	predicate registers (p0-p63)
+ *	b0-b7
+ *	ip cfm psr
+ *	ar.rsc ar.bsp ar.bspstore ar.rnat
+ *	ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+ */
+#define ELF_NGREG	128	/* we really need just 72 but let's leave some headroom... */
+#define ELF_NFPREG	128	/* f0 and f1 could be omitted, but so what... */
+
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET     0
+#define ELF_NAT_OFFSET     (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET      (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET    (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET  (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET     (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i)   (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i)   (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET  (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET  (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET  (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET  (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET   (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET   (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET  (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET  (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET  (57 * sizeof(elf_greg_t))
+
+typedef unsigned long elf_fpxregset_t;
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct ia64_fpreg elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+
+
+struct pt_regs;	/* forward declaration... */
+extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
+#define ELF_CORE_COPY_REGS(_dest,_regs)	ia64_elf_core_copy_regs(_regs, _dest);
+
+/* This macro yields a bitmask that programs can use to figure out
+   what instruction set this CPU supports.  */
+#define ELF_HWCAP 	0
+
+/* This macro yields a string that ld.so will use to load
+   implementation specific libraries for optimization.  Not terribly
+   relevant until we have real hardware to play with... */
+#define ELF_PLATFORM	NULL
+
+#define elf_read_implies_exec(ex, executable_stack)					\
+	((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
+
+struct task_struct;
+
+#define GATE_EHDR	((const struct elfhdr *) GATE_ADDR)
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO								\
+do {										\
+	extern char __kernel_syscall_via_epc[];					\
+	NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc);	\
+	NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR);		\
+} while (0)
+
+/*
+ * format for entries in the Global Offset Table
+ */
+struct got_entry {
+	uint64_t val;
+};
+
+/*
+ * Layout of the Function Descriptor
+ */
+struct fdesc {
+	uint64_t ip;
+	uint64_t gp;
+};
+
+#endif /* _ASM_IA64_ELF_H */
diff --git a/arch/ia64/include/asm/emergency-restart.h b/arch/ia64/include/asm/emergency-restart.h
new file mode 100644
index 0000000..108d8c4
--- /dev/null
+++ b/arch/ia64/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/ia64/include/asm/esi.h b/arch/ia64/include/asm/esi.h
new file mode 100644
index 0000000..40991c6
--- /dev/null
+++ b/arch/ia64/include/asm/esi.h
@@ -0,0 +1,29 @@
+/*
+ * ESI service calls.
+ *
+ * Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P.
+ * 	Alex Williamson <alex.williamson@hp.com>
+ */
+#ifndef esi_h
+#define esi_h
+
+#include <linux/efi.h>
+
+#define ESI_QUERY			0x00000001
+#define ESI_OPEN_HANDLE			0x02000000
+#define ESI_CLOSE_HANDLE		0x02000001
+
+enum esi_proc_type {
+	ESI_PROC_SERIALIZED,	/* calls need to be serialized */
+	ESI_PROC_MP_SAFE,	/* MP-safe, but not reentrant */
+	ESI_PROC_REENTRANT	/* MP-safe and reentrant */
+};
+
+extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
+extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
+			 enum esi_proc_type,
+			 u64, u64, u64, u64, u64, u64, u64, u64);
+extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64,
+                              u64, u64, u64, u64, u64, u64);
+
+#endif /* esi_h */
diff --git a/arch/ia64/include/asm/fb.h b/arch/ia64/include/asm/fb.h
new file mode 100644
index 0000000..89a397c
--- /dev/null
+++ b/arch/ia64/include/asm/fb.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <linux/efi.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/ia64/include/asm/fpswa.h b/arch/ia64/include/asm/fpswa.h
new file mode 100644
index 0000000..62edfce
--- /dev/null
+++ b/arch/ia64/include/asm/fpswa.h
@@ -0,0 +1,73 @@
+#ifndef _ASM_IA64_FPSWA_H
+#define _ASM_IA64_FPSWA_H
+
+/*
+ * Floating-point Software Assist
+ *
+ * Copyright (C) 1999 Intel Corporation.
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
+ */
+
+typedef struct {
+	/* 4 * 128 bits */
+	unsigned long fp_lp[4*2];
+} fp_state_low_preserved_t;
+
+typedef struct {
+	/* 10 * 128 bits */
+	unsigned long fp_lv[10 * 2];
+} fp_state_low_volatile_t;
+
+typedef	struct {
+	/* 16 * 128 bits */
+	unsigned long fp_hp[16 * 2];
+} fp_state_high_preserved_t;
+
+typedef struct {
+	/* 96 * 128 bits */
+	unsigned long fp_hv[96 * 2];
+} fp_state_high_volatile_t;
+
+/**
+ * floating point state to be passed to the FP emulation library by
+ * the trap/fault handler
+ */
+typedef struct {
+	unsigned long			bitmask_low64;
+	unsigned long			bitmask_high64;
+	fp_state_low_preserved_t	*fp_state_low_preserved;
+	fp_state_low_volatile_t		*fp_state_low_volatile;
+	fp_state_high_preserved_t	*fp_state_high_preserved;
+	fp_state_high_volatile_t	*fp_state_high_volatile;
+} fp_state_t;
+
+typedef struct {
+	unsigned long status;
+	unsigned long err0;
+	unsigned long err1;
+	unsigned long err2;
+} fpswa_ret_t;
+
+/**
+ * function header for the Floating Point software assist
+ * library. This function is invoked by the Floating point software
+ * assist trap/fault handler.
+ */
+typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
+				    unsigned long *fsr, unsigned long *isr, unsigned long *preds,
+				    unsigned long *ifs, fp_state_t *fp_state);
+
+/**
+ * This is the FPSWA library interface as defined by EFI.  We need to pass a 
+ * pointer to the interface itself on a call to the assist library
+ */
+typedef struct {
+	unsigned int	 revision;
+	unsigned int	 reserved;
+	efi_fpswa_t	 fpswa;
+} fpswa_interface_t;
+
+extern fpswa_interface_t *fpswa_interface;
+
+#endif /* _ASM_IA64_FPSWA_H */
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
new file mode 100644
index 0000000..fbd1a24
--- /dev/null
+++ b/arch/ia64/include/asm/ftrace.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_FTRACE_H
+#define _ASM_IA64_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_INSN_SIZE        32 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
+#define mcount _mcount
+
+/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
+#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
+#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	/* second bundle, insn 2 */
+	return addr - 0x12;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _ASM_IA64_FTRACE_H */
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
new file mode 100644
index 0000000..76acbcd
--- /dev/null
+++ b/arch/ia64/include/asm/futex.h
@@ -0,0 +1,126 @@
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+do {									\
+	register unsigned long r8 __asm ("r8") = 0;			\
+	__asm__ __volatile__(						\
+		"	mf;;					\n"	\
+		"[1:] "	insn ";;				\n"	\
+		"	.xdata4 \"__ex_table\", 1b-., 2f-.	\n"	\
+		"[2:]"							\
+		: "+r" (r8), "=r" (oldval)				\
+		: "r" (uaddr), "r" (oparg)				\
+		: "memory");						\
+	ret = r8;							\
+} while (0)
+
+#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
+do {									\
+	register unsigned long r8 __asm ("r8") = 0;			\
+	int val, newval;						\
+	do {								\
+		__asm__ __volatile__(					\
+			"	mf;;				  \n"	\
+			"[1:]	ld4 %3=[%4];;			  \n"	\
+			"	mov %2=%3			  \n"	\
+				insn	";;			  \n"	\
+			"	mov ar.ccv=%2;;			  \n"	\
+			"[2:]	cmpxchg4.acq %1=[%4],%3,ar.ccv;;  \n"	\
+			"	.xdata4 \"__ex_table\", 1b-., 3f-.\n"	\
+			"	.xdata4 \"__ex_table\", 2b-., 3f-.\n"	\
+			"[3:]"						\
+			: "+r" (r8), "=r" (val), "=&r" (oldval),	\
+			   "=&r" (newval)				\
+			: "r" (uaddr), "r" (oparg)			\
+			: "memory");					\
+		if (unlikely (r8))					\
+			break;						\
+	} while (unlikely (val != oldval));				\
+	ret = r8;							\
+} while (0)
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+{
+	int op = (encoded_op >> 28) & 7;
+	int cmp = (encoded_op >> 24) & 15;
+	int oparg = (encoded_op << 8) >> 20;
+	int cmparg = (encoded_op << 20) >> 20;
+	int oldval = 0, ret;
+	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+		oparg = 1 << oparg;
+
+	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	switch (op) {
+	case FUTEX_OP_SET:
+		__futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr,
+				   oparg);
+		break;
+	case FUTEX_OP_ADD:
+		__futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_OR:
+		__futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ANDN:
+		__futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr,
+				   ~oparg);
+		break;
+	case FUTEX_OP_XOR:
+		__futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	pagefault_enable();
+
+	if (!ret) {
+		switch (cmp) {
+		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+		default: ret = -ENOSYS;
+		}
+	}
+	return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+			      u32 oldval, u32 newval)
+{
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+	{
+		register unsigned long r8 __asm ("r8") = 0;
+		unsigned long prev;
+		__asm__ __volatile__(
+			"	mf;;					\n"
+			"	mov ar.ccv=%4;;				\n"
+			"[1:]	cmpxchg4.acq %1=[%2],%3,ar.ccv		\n"
+			"	.xdata4 \"__ex_table\", 1b-., 2f-.	\n"
+			"[2:]"
+			: "+r" (r8), "=&r" (prev)
+			: "r" (uaddr), "r" (newval),
+			  "rO" ((long) (unsigned) oldval)
+			: "memory");
+		*uval = prev;
+		return r8;
+	}
+}
+
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h
new file mode 100644
index 0000000..f9495b1
--- /dev/null
+++ b/arch/ia64/include/asm/gcc_intrin.h
@@ -0,0 +1,12 @@
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+#ifndef _ASM_IA64_GCC_INTRIN_H
+#define _ASM_IA64_GCC_INTRIN_H
+
+#include <uapi/asm/gcc_intrin.h>
+
+register unsigned long ia64_r13 asm ("r13") __used;
+#endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/arch/ia64/include/asm/gpio.h b/arch/ia64/include/asm/gpio.h
new file mode 100644
index 0000000..b3799d8
--- /dev/null
+++ b/arch/ia64/include/asm/gpio.h
@@ -0,0 +1,4 @@
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
new file mode 100644
index 0000000..8fb7d33
--- /dev/null
+++ b/arch/ia64/include/asm/hardirq.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_IA64_HARDIRQ_H
+#define _ASM_IA64_HARDIRQ_H
+
+/*
+ * Modified 1998-2002, 2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * No irq_cpustat_t for IA-64.  The data is held in the per-CPU data structure.
+ */
+
+#define __ARCH_IRQ_STAT	1
+
+#define local_softirq_pending()		(local_cpu_data->softirq_pending)
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+
+extern void __iomem *ipi_base_addr;
+
+void ack_bad_irq(unsigned int irq);
+
+#endif /* _ASM_IA64_HARDIRQ_H */
diff --git a/arch/ia64/include/asm/hpsim.h b/arch/ia64/include/asm/hpsim.h
new file mode 100644
index 0000000..0fe5022
--- /dev/null
+++ b/arch/ia64/include/asm/hpsim.h
@@ -0,0 +1,16 @@
+#ifndef _ASMIA64_HPSIM_H
+#define _ASMIA64_HPSIM_H
+
+#ifndef CONFIG_HP_SIMSERIAL_CONSOLE
+static inline int simcons_register(void) { return 1; }
+#else
+int simcons_register(void);
+#endif
+
+struct tty_driver;
+extern struct tty_driver *hp_simserial_driver;
+
+extern int hpsim_get_irq(int intr);
+void ia64_ctl_trace(long on);
+
+#endif
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h
new file mode 100644
index 0000000..ef65f02
--- /dev/null
+++ b/arch/ia64/include/asm/hugetlb.h
@@ -0,0 +1,72 @@
+#ifndef _ASM_IA64_HUGETLB_H
+#define _ASM_IA64_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+			    unsigned long end, unsigned long floor,
+			    unsigned long ceiling);
+
+int prepare_hugepage_range(struct file *file,
+			unsigned long addr, unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+					 unsigned long addr,
+					 unsigned long len)
+{
+	return (REGION_NUMBER(addr) == RGN_HPAGE ||
+		REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+				   pte_t *ptep, pte_t pte)
+{
+	set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+					    unsigned long addr, pte_t *ptep)
+{
+	return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+	return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+	return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep)
+{
+	ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+					     unsigned long addr, pte_t *ptep,
+					     pte_t pte, int dirty)
+{
+	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_IA64_HUGETLB_H */
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
new file mode 100644
index 0000000..74347eb
--- /dev/null
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -0,0 +1,186 @@
+#ifndef _ASM_IA64_HW_IRQ_H
+#define _ASM_IA64_HW_IRQ_H
+
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/profile.h>
+
+#include <asm/machvec.h>
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+typedef u8 ia64_vector;
+
+/*
+ * 0 special
+ *
+ * 1,3-14 are reserved from firmware
+ *
+ * 16-255 (vectored external interrupts) are available
+ *
+ * 15 spurious interrupt (see IVR)
+ *
+ * 16 lowest priority, 255 highest priority
+ *
+ * 15 classes of 16 interrupts each.
+ */
+#define IA64_MIN_VECTORED_IRQ		 16
+#define IA64_MAX_VECTORED_IRQ		255
+#define IA64_NUM_VECTORS		256
+
+#define AUTO_ASSIGN			-1
+
+#define IA64_SPURIOUS_INT_VECTOR	0x0f
+
+/*
+ * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
+ */
+#define IA64_CPEP_VECTOR		0x1c	/* corrected platform error polling vector */
+#define IA64_CMCP_VECTOR		0x1d	/* corrected machine-check polling vector */
+#define IA64_CPE_VECTOR			0x1e	/* corrected platform error interrupt vector */
+#define IA64_CMC_VECTOR			0x1f	/* corrected machine-check interrupt vector */
+/*
+ * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
+ * Use vectors 0x30-0xe7 as the default device vector range for ia64.
+ * Platforms may choose to reduce this range in platform_irq_setup, but the
+ * platform range must fall within
+ *	[IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR]
+ */
+extern int ia64_first_device_vector;
+extern int ia64_last_device_vector;
+
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
+/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
+#define IA64_IRQ_MOVE_VECTOR		0x30	/* "move IRQ" IPI */
+#define IA64_DEF_FIRST_DEVICE_VECTOR	0x31
+#else
+#define IA64_DEF_FIRST_DEVICE_VECTOR	0x30
+#endif
+#define IA64_DEF_LAST_DEVICE_VECTOR	0xe7
+#define IA64_FIRST_DEVICE_VECTOR	ia64_first_device_vector
+#define IA64_LAST_DEVICE_VECTOR		ia64_last_device_vector
+#define IA64_MAX_DEVICE_VECTORS		(IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1)
+#define IA64_NUM_DEVICE_VECTORS		(IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
+
+#define IA64_MCA_RENDEZ_VECTOR		0xe8	/* MCA rendez interrupt */
+#define IA64_PERFMON_VECTOR		0xee	/* performance monitor interrupt vector */
+#define IA64_TIMER_VECTOR		0xef	/* use highest-prio group 15 interrupt for timer */
+#define	IA64_MCA_WAKEUP_VECTOR		0xf0	/* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
+#define IA64_IPI_LOCAL_TLB_FLUSH	0xfc	/* SMP flush local TLB */
+#define IA64_IPI_RESCHEDULE		0xfd	/* SMP reschedule */
+#define IA64_IPI_VECTOR			0xfe	/* inter-processor interrupt vector */
+
+/* Used for encoding redirected irqs */
+
+#define IA64_IRQ_REDIRECTED		(1 << 31)
+
+/* IA64 inter-cpu interrupt related definitions */
+
+#define IA64_IPI_DEFAULT_BASE_ADDR	0xfee00000
+
+/* Delivery modes for inter-cpu interrupts */
+enum {
+        IA64_IPI_DM_INT =       0x0,    /* pend an external interrupt */
+        IA64_IPI_DM_PMI =       0x2,    /* pend a PMI */
+        IA64_IPI_DM_NMI =       0x4,    /* pend an NMI (vector 2) */
+        IA64_IPI_DM_INIT =      0x5,    /* pend an INIT interrupt */
+        IA64_IPI_DM_EXTINT =    0x7,    /* pend an 8259-compatible interrupt. */
+};
+
+extern __u8 isa_irq_to_vector_map[16];
+#define isa_irq_to_vector(x)	isa_irq_to_vector_map[(x)]
+
+struct irq_cfg {
+	ia64_vector vector;
+	cpumask_t domain;
+	cpumask_t old_domain;
+	unsigned move_cleanup_count;
+	u8 move_in_progress : 1;
+};
+extern spinlock_t vector_lock;
+extern struct irq_cfg irq_cfg[NR_IRQS];
+#define irq_to_domain(x)	irq_cfg[(x)].domain
+DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
+
+extern struct irq_chip irq_type_ia64_lsapic;	/* CPU-internal interrupt controller */
+
+#define ia64_register_ipi	ia64_native_register_ipi
+#define assign_irq_vector	ia64_native_assign_irq_vector
+#define free_irq_vector		ia64_native_free_irq_vector
+#define register_percpu_irq	ia64_native_register_percpu_irq
+#define ia64_resend_irq		ia64_native_resend_irq
+
+extern void ia64_native_register_ipi(void);
+extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
+extern int ia64_native_assign_irq_vector (int irq);	/* allocate a free vector */
+extern void ia64_native_free_irq_vector (int vector);
+extern int reserve_irq_vector (int vector);
+extern void __setup_vector_irq(int cpu);
+extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
+extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
+extern void destroy_and_reserve_irq (unsigned int irq);
+
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+extern int irq_prepare_move(int irq, int cpu);
+extern void irq_complete_move(unsigned int irq);
+#else
+static inline int irq_prepare_move(int irq, int cpu) { return 0; }
+static inline void irq_complete_move(unsigned int irq) {}
+#endif
+
+static inline void ia64_native_resend_irq(unsigned int vector)
+{
+	platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+}
+
+/*
+ * Default implementations for the irq-descriptor API:
+ */
+#ifndef CONFIG_IA64_GENERIC
+static inline ia64_vector __ia64_irq_to_vector(int irq)
+{
+	return irq_cfg[irq].vector;
+}
+
+static inline unsigned int
+__ia64_local_vector_to_irq (ia64_vector vec)
+{
+	return __this_cpu_read(vector_irq[vec]);
+}
+#endif
+
+/*
+ * Next follows the irq descriptor interface.  On IA-64, each CPU supports 256 interrupt
+ * vectors.  On smaller systems, there is a one-to-one correspondence between interrupt
+ * vectors and the Linux irq numbers.  However, larger systems may have multiple interrupt
+ * domains meaning that the translation from vector number to irq number depends on the
+ * interrupt domain that a CPU belongs to.  This API abstracts such platform-dependent
+ * differences and provides a uniform means to translate between vector and irq numbers
+ * and to obtain the irq descriptor for a given irq number.
+ */
+
+/* Extract the IA-64 vector that corresponds to IRQ.  */
+static inline ia64_vector
+irq_to_vector (int irq)
+{
+	return platform_irq_to_vector(irq);
+}
+
+/*
+ * Convert the local IA-64 vector to the corresponding irq number.  This translation is
+ * done in the context of the interrupt domain that the currently executing CPU belongs
+ * to.
+ */
+static inline unsigned int
+local_vector_to_irq (ia64_vector vec)
+{
+	return platform_local_vector_to_irq(vec);
+}
+
+#endif /* _ASM_IA64_HW_IRQ_H */
diff --git a/arch/ia64/include/asm/idle.h b/arch/ia64/include/asm/idle.h
new file mode 100644
index 0000000..b768501
--- /dev/null
+++ b/arch/ia64/include/asm/idle.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_IA64_IDLE_H
+#define _ASM_IA64_IDLE_H
+
+static inline void enter_idle(void) { }
+static inline void exit_idle(void) { }
+
+#endif /* _ASM_IA64_IDLE_H */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
new file mode 100644
index 0000000..ec970a9
--- /dev/null
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -0,0 +1,12 @@
+/*
+ * Compiler-dependent intrinsics.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_INTRINSICS_H
+#define _ASM_IA64_INTRINSICS_H
+
+#include <uapi/asm/intrinsics.h>
+
+#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
new file mode 100644
index 0000000..8fdb9c7
--- /dev/null
+++ b/arch/ia64/include/asm/io.h
@@ -0,0 +1,451 @@
+#ifndef _ASM_IA64_IO_H
+#define _ASM_IA64_IO_H
+
+/*
+ * This file contains the definitions for the emulated IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated to
+ * (a) handle it all in a way that makes gcc able to optimize it as
+ * well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <asm/unaligned.h>
+
+/* We don't use IO slowdowns on the ia64, but.. */
+#define __SLOW_DOWN_IO	do { } while (0)
+#define SLOW_DOWN_IO	do { } while (0)
+
+#define __IA64_UNCACHED_OFFSET	RGN_BASE(RGN_UNCACHED)
+
+/*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
+ * large machines may have multiple other I/O spaces so we can't place any a priori limit
+ * on IO_SPACE_LIMIT.  These additional spaces are described in ACPI.
+ */
+#define IO_SPACE_LIMIT		0xffffffffffffffffUL
+
+#define MAX_IO_SPACES_BITS		8
+#define MAX_IO_SPACES			(1UL << MAX_IO_SPACES_BITS)
+#define IO_SPACE_BITS			24
+#define IO_SPACE_SIZE			(1UL << IO_SPACE_BITS)
+
+#define IO_SPACE_NR(port)		((port) >> IO_SPACE_BITS)
+#define IO_SPACE_BASE(space)		((space) << IO_SPACE_BITS)
+#define IO_SPACE_PORT(port)		((port) & (IO_SPACE_SIZE - 1))
+
+#define IO_SPACE_SPARSE_ENCODING(p)	((((p) >> 2) << 12) | ((p) & 0xfff))
+
+struct io_space {
+	unsigned long mmio_base;	/* base in MMIO space */
+	int sparse;
+};
+
+extern struct io_space io_space[];
+extern unsigned int num_io_spaces;
+
+# ifdef __KERNEL__
+
+/*
+ * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
+ *	0xCxxxxxxxxxxxxxxx	MMIO cookie (return from ioremap)
+ *	0x000000001SPPPPPP	PIO cookie (S=space number, P..P=port)
+ *
+ * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
+ * code that uses bare port numbers without the prerequisite pci_iomap().
+ */
+#define PIO_OFFSET		(1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
+#define PIO_MASK		(PIO_OFFSET - 1)
+#define PIO_RESERVED		__IA64_UNCACHED_OFFSET
+#define HAVE_ARCH_PIO_SIZE
+
+#include <asm/intrinsics.h>
+#include <asm/machvec.h>
+#include <asm/page.h>
+#include <asm-generic/iomap.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long
+virt_to_phys (volatile void *address)
+{
+	return (unsigned long) address - PAGE_OFFSET;
+}
+
+static inline void*
+phys_to_virt (unsigned long address)
+{
+	return (void *) (address + PAGE_OFFSET);
+}
+
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
+extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
+extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
+
+/*
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
+#define bus_to_virt	phys_to_virt
+#define virt_to_bus	virt_to_phys
+#define page_to_bus	page_to_phys
+
+# endif /* KERNEL */
+
+/*
+ * Memory fence w/accept.  This should never be used in code that is
+ * not IA-64 specific.
+ */
+#define __ia64_mf_a()	ia64_mfa()
+
+/**
+ * ___ia64_mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes.  This will make sure that writes
+ * following the barrier will arrive after all previous writes.  For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ *
+ * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ */
+static inline void ___ia64_mmiowb(void)
+{
+	ia64_mfa();
+}
+
+static inline void*
+__ia64_mk_io_addr (unsigned long port)
+{
+	struct io_space *space;
+	unsigned long offset;
+
+	space = &io_space[IO_SPACE_NR(port)];
+	port = IO_SPACE_PORT(port);
+	if (space->sparse)
+		offset = IO_SPACE_SPARSE_ENCODING(port);
+	else
+		offset = port;
+
+	return (void *) (space->mmio_base | offset);
+}
+
+#define __ia64_inb	___ia64_inb
+#define __ia64_inw	___ia64_inw
+#define __ia64_inl	___ia64_inl
+#define __ia64_outb	___ia64_outb
+#define __ia64_outw	___ia64_outw
+#define __ia64_outl	___ia64_outl
+#define __ia64_readb	___ia64_readb
+#define __ia64_readw	___ia64_readw
+#define __ia64_readl	___ia64_readl
+#define __ia64_readq	___ia64_readq
+#define __ia64_readb_relaxed	___ia64_readb
+#define __ia64_readw_relaxed	___ia64_readw
+#define __ia64_readl_relaxed	___ia64_readl
+#define __ia64_readq_relaxed	___ia64_readq
+#define __ia64_writeb	___ia64_writeb
+#define __ia64_writew	___ia64_writew
+#define __ia64_writel	___ia64_writel
+#define __ia64_writeq	___ia64_writeq
+#define __ia64_mmiowb	___ia64_mmiowb
+
+/*
+ * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
+ * that the access has completed before executing other I/O accesses.  Since we're doing
+ * the accesses through an uncachable (UC) translation, the CPU will execute them in
+ * program order.  However, we still need to tell the compiler not to shuffle them around
+ * during optimization, which is why we use "volatile" pointers.
+ */
+
+static inline unsigned int
+___ia64_inb (unsigned long port)
+{
+	volatile unsigned char *addr = __ia64_mk_io_addr(port);
+	unsigned char ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+static inline unsigned int
+___ia64_inw (unsigned long port)
+{
+	volatile unsigned short *addr = __ia64_mk_io_addr(port);
+	unsigned short ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+static inline unsigned int
+___ia64_inl (unsigned long port)
+{
+	volatile unsigned int *addr = __ia64_mk_io_addr(port);
+	unsigned int ret;
+
+	ret = *addr;
+	__ia64_mf_a();
+	return ret;
+}
+
+static inline void
+___ia64_outb (unsigned char val, unsigned long port)
+{
+	volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+	*addr = val;
+	__ia64_mf_a();
+}
+
+static inline void
+___ia64_outw (unsigned short val, unsigned long port)
+{
+	volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+	*addr = val;
+	__ia64_mf_a();
+}
+
+static inline void
+___ia64_outl (unsigned int val, unsigned long port)
+{
+	volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+	*addr = val;
+	__ia64_mf_a();
+}
+
+static inline void
+__insb (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned char *dp = dst;
+
+	while (count--)
+		*dp++ = platform_inb(port);
+}
+
+static inline void
+__insw (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned short *dp = dst;
+
+	while (count--)
+		put_unaligned(platform_inw(port), dp++);
+}
+
+static inline void
+__insl (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned int *dp = dst;
+
+	while (count--)
+		put_unaligned(platform_inl(port), dp++);
+}
+
+static inline void
+__outsb (unsigned long port, const void *src, unsigned long count)
+{
+	const unsigned char *sp = src;
+
+	while (count--)
+		platform_outb(*sp++, port);
+}
+
+static inline void
+__outsw (unsigned long port, const void *src, unsigned long count)
+{
+	const unsigned short *sp = src;
+
+	while (count--)
+		platform_outw(get_unaligned(sp++), port);
+}
+
+static inline void
+__outsl (unsigned long port, const void *src, unsigned long count)
+{
+	const unsigned int *sp = src;
+
+	while (count--)
+		platform_outl(get_unaligned(sp++), port);
+}
+
+/*
+ * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
+ * specification regarding legacy I/O support.  Thus, we have to make these operations
+ * platform dependent...
+ */
+#define __inb		platform_inb
+#define __inw		platform_inw
+#define __inl		platform_inl
+#define __outb		platform_outb
+#define __outw		platform_outw
+#define __outl		platform_outl
+#define __mmiowb	platform_mmiowb
+
+#define inb(p)		__inb(p)
+#define inw(p)		__inw(p)
+#define inl(p)		__inl(p)
+#define insb(p,d,c)	__insb(p,d,c)
+#define insw(p,d,c)	__insw(p,d,c)
+#define insl(p,d,c)	__insl(p,d,c)
+#define outb(v,p)	__outb(v,p)
+#define outw(v,p)	__outw(v,p)
+#define outl(v,p)	__outl(v,p)
+#define outsb(p,s,c)	__outsb(p,s,c)
+#define outsw(p,s,c)	__outsw(p,s,c)
+#define outsl(p,s,c)	__outsl(p,s,c)
+#define mmiowb()	__mmiowb()
+
+/*
+ * The address passed to these functions are ioremap()ped already.
+ *
+ * We need these to be machine vectors since some platforms don't provide
+ * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
+ * a good idea).  Writes are ok though for all existing ia64 platforms (and
+ * hopefully it'll stay that way).
+ */
+static inline unsigned char
+___ia64_readb (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___ia64_readw (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___ia64_readl (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___ia64_readq (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned long __force *) addr;
+}
+
+static inline void
+__writeb (unsigned char val, volatile void __iomem *addr)
+{
+	*(volatile unsigned char __force *) addr = val;
+}
+
+static inline void
+__writew (unsigned short val, volatile void __iomem *addr)
+{
+	*(volatile unsigned short __force *) addr = val;
+}
+
+static inline void
+__writel (unsigned int val, volatile void __iomem *addr)
+{
+	*(volatile unsigned int __force *) addr = val;
+}
+
+static inline void
+__writeq (unsigned long val, volatile void __iomem *addr)
+{
+	*(volatile unsigned long __force *) addr = val;
+}
+
+#define __readb		platform_readb
+#define __readw		platform_readw
+#define __readl		platform_readl
+#define __readq		platform_readq
+#define __readb_relaxed	platform_readb_relaxed
+#define __readw_relaxed	platform_readw_relaxed
+#define __readl_relaxed	platform_readl_relaxed
+#define __readq_relaxed	platform_readq_relaxed
+
+#define readb(a)	__readb((a))
+#define readw(a)	__readw((a))
+#define readl(a)	__readl((a))
+#define readq(a)	__readq((a))
+#define readb_relaxed(a)	__readb_relaxed((a))
+#define readw_relaxed(a)	__readw_relaxed((a))
+#define readl_relaxed(a)	__readl_relaxed((a))
+#define readq_relaxed(a)	__readq_relaxed((a))
+#define __raw_readb	readb
+#define __raw_readw	readw
+#define __raw_readl	readl
+#define __raw_readq	readq
+#define __raw_readb_relaxed	readb_relaxed
+#define __raw_readw_relaxed	readw_relaxed
+#define __raw_readl_relaxed	readl_relaxed
+#define __raw_readq_relaxed	readq_relaxed
+#define writeb(v,a)	__writeb((v), (a))
+#define writew(v,a)	__writew((v), (a))
+#define writel(v,a)	__writel((v), (a))
+#define writeq(v,a)	__writeq((v), (a))
+#define writeb_relaxed(v,a)	__writeb((v), (a))
+#define writew_relaxed(v,a)	__writew((v), (a))
+#define writel_relaxed(v,a)	__writel((v), (a))
+#define writeq_relaxed(v,a)	__writeq((v), (a))
+#define __raw_writeb	writeb
+#define __raw_writew	writew
+#define __raw_writel	writel
+#define __raw_writeq	writeq
+
+#ifndef inb_p
+# define inb_p		inb
+#endif
+#ifndef inw_p
+# define inw_p		inw
+#endif
+#ifndef inl_p
+# define inl_p		inl
+#endif
+
+#ifndef outb_p
+# define outb_p		outb
+#endif
+#ifndef outw_p
+# define outw_p		outw
+#endif
+#ifndef outl_p
+# define outl_p		outl
+#endif
+
+# ifdef __KERNEL__
+
+extern void __iomem * ioremap(unsigned long offset, unsigned long size);
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap (volatile void __iomem *addr);
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+#define early_memremap(phys_addr, size)        early_ioremap(phys_addr, size)
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+#define early_memunmap(addr, size)             early_iounmap(addr, size)
+static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
+{
+	return ioremap(phys_addr, size);
+}
+#define ioremap_cache ioremap_cache
+#define ioremap_uc ioremap_nocache
+
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
+extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
+extern void memset_io(volatile void __iomem *s, int c, long n);
+
+# endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
new file mode 100644
index 0000000..105c93b
--- /dev/null
+++ b/arch/ia64/include/asm/iommu.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_IOMMU_H
+#define _ASM_IA64_IOMMU_H 1
+
+#define cpu_has_x2apic 0
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+
+extern void pci_iommu_shutdown(void);
+extern void no_iommu_init(void);
+#ifdef	CONFIG_INTEL_IOMMU
+extern int force_iommu, no_iommu;
+extern int iommu_pass_through;
+extern int iommu_detected;
+#else
+#define iommu_pass_through	(0)
+#define no_iommu		(1)
+#define iommu_detected		(0)
+#endif
+extern void iommu_dma_init(void);
+extern void machvec_init(const char *name);
+
+#endif
diff --git a/arch/ia64/include/asm/iommu_table.h b/arch/ia64/include/asm/iommu_table.h
new file mode 100644
index 0000000..92c8d36
--- /dev/null
+++ b/arch/ia64/include/asm/iommu_table.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_IOMMU_TABLE_H
+#define _ASM_IA64_IOMMU_TABLE_H
+
+#define IOMMU_INIT_POST(_detect)
+
+#endif /* _ASM_IA64_IOMMU_TABLE_H */
diff --git a/arch/ia64/include/asm/iosapic.h b/arch/ia64/include/asm/iosapic.h
new file mode 100644
index 0000000..4ae1fbd
--- /dev/null
+++ b/arch/ia64/include/asm/iosapic.h
@@ -0,0 +1,117 @@
+#ifndef __ASM_IA64_IOSAPIC_H
+#define __ASM_IA64_IOSAPIC_H
+
+#define	IOSAPIC_REG_SELECT	0x0
+#define	IOSAPIC_WINDOW		0x10
+#define	IOSAPIC_EOI		0x40
+
+#define	IOSAPIC_VERSION		0x1
+
+/*
+ * Redirection table entry
+ */
+#define	IOSAPIC_RTE_LOW(i)	(0x10+i*2)
+#define	IOSAPIC_RTE_HIGH(i)	(0x11+i*2)
+
+#define	IOSAPIC_DEST_SHIFT		16
+
+/*
+ * Delivery mode
+ */
+#define	IOSAPIC_DELIVERY_SHIFT		8
+#define	IOSAPIC_FIXED			0x0
+#define	IOSAPIC_LOWEST_PRIORITY	0x1
+#define	IOSAPIC_PMI			0x2
+#define	IOSAPIC_NMI			0x4
+#define	IOSAPIC_INIT			0x5
+#define	IOSAPIC_EXTINT			0x7
+
+/*
+ * Interrupt polarity
+ */
+#define	IOSAPIC_POLARITY_SHIFT		13
+#define	IOSAPIC_POL_HIGH		0
+#define	IOSAPIC_POL_LOW		1
+
+/*
+ * Trigger mode
+ */
+#define	IOSAPIC_TRIGGER_SHIFT		15
+#define	IOSAPIC_EDGE			0
+#define	IOSAPIC_LEVEL			1
+
+/*
+ * Mask bit
+ */
+
+#define	IOSAPIC_MASK_SHIFT		16
+#define	IOSAPIC_MASK			(1<<IOSAPIC_MASK_SHIFT)
+
+#define IOSAPIC_VECTOR_MASK		0xffffff00
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_IOSAPIC
+
+#define NR_IOSAPICS			256
+
+#define iosapic_pcat_compat_init	ia64_native_iosapic_pcat_compat_init
+#define __iosapic_read			__ia64_native_iosapic_read
+#define __iosapic_write			__ia64_native_iosapic_write
+#define iosapic_get_irq_chip		ia64_native_iosapic_get_irq_chip
+
+extern void __init ia64_native_iosapic_pcat_compat_init(void);
+extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
+
+static inline unsigned int
+__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
+{
+	writel(reg, iosapic + IOSAPIC_REG_SELECT);
+	return readl(iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void
+__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
+{
+	writel(reg, iosapic + IOSAPIC_REG_SELECT);
+	writel(val, iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
+{
+	writel(vector, iosapic + IOSAPIC_EOI);
+}
+
+extern void __init iosapic_system_init (int pcat_compat);
+extern int iosapic_init (unsigned long address, unsigned int gsi_base);
+extern int iosapic_remove (unsigned int gsi_base);
+extern int gsi_to_irq (unsigned int gsi);
+extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
+				  unsigned long trigger);
+extern void iosapic_unregister_intr (unsigned int irq);
+extern void iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
+				      unsigned long polarity,
+				      unsigned long trigger);
+extern int __init iosapic_register_platform_intr (u32 int_type,
+					   unsigned int gsi,
+					   int pmi_vector,
+					   u16 eid, u16 id,
+					   unsigned long polarity,
+					   unsigned long trigger);
+
+#ifdef CONFIG_NUMA
+extern void map_iosapic_to_node (unsigned int, int);
+#endif
+#else
+#define iosapic_system_init(pcat_compat)			do { } while (0)
+#define iosapic_init(address,gsi_base)				(-EINVAL)
+#define iosapic_remove(gsi_base)				(-ENODEV)
+#define iosapic_register_intr(gsi,polarity,trigger)		(gsi)
+#define iosapic_unregister_intr(irq)				do { } while (0)
+#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger)	do { } while (0)
+#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
+	polarity,trigger)					(gsi)
+#endif
+
+# endif /* !__ASSEMBLY__ */
+#endif /* __ASM_IA64_IOSAPIC_H */
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
new file mode 100644
index 0000000..820667c
--- /dev/null
+++ b/arch/ia64/include/asm/irq.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_IRQ_H
+#define _ASM_IA64_IRQ_H
+
+/*
+ * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *	Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 11/24/98	S.Eranian 	updated TIMER_IRQ and irq_canonicalize
+ * 01/20/99	S.Eranian	added keyboard interrupt
+ * 02/29/00     D.Mosberger	moved most things into hw_irq.h
+ */
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <generated/nr-irqs.h>
+
+static __inline__ int
+irq_canonicalize (int irq)
+{
+	/*
+	 * We do the legacy thing here of pretending that irqs < 16
+	 * are 8259 irqs.  This really shouldn't be necessary at all,
+	 * but we keep it here as serial.c still uses it...
+	 */
+	return ((irq == 2) ? 9 : irq);
+}
+
+extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
+bool is_affinity_mask_valid(const struct cpumask *cpumask);
+
+#define is_affinity_mask_valid is_affinity_mask_valid
+
+int create_irq(void);
+void destroy_irq(unsigned int irq);
+
+#endif /* _ASM_IA64_IRQ_H */
diff --git a/arch/ia64/include/asm/irq_regs.h b/arch/ia64/include/asm/irq_regs.h
new file mode 100644
index 0000000..3dd9c0b
--- /dev/null
+++ b/arch/ia64/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
diff --git a/arch/ia64/include/asm/irq_remapping.h b/arch/ia64/include/asm/irq_remapping.h
new file mode 100644
index 0000000..a8687b1
--- /dev/null
+++ b/arch/ia64/include/asm/irq_remapping.h
@@ -0,0 +1,4 @@
+#ifndef __IA64_INTR_REMAPPING_H
+#define __IA64_INTR_REMAPPING_H
+#define irq_remapping_enabled 0
+#endif
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
new file mode 100644
index 0000000..cec6c06
--- /dev/null
+++ b/arch/ia64/include/asm/irqflags.h
@@ -0,0 +1,98 @@
+/*
+ * IRQ flags defines.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#ifndef _ASM_IA64_IRQFLAGS_H
+#define _ASM_IA64_IRQFLAGS_H
+
+#include <asm/pal.h>
+#include <asm/kregs.h>
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+extern unsigned long last_cli_ip;
+static inline void arch_maybe_save_ip(unsigned long flags)
+{
+	if (flags & IA64_PSR_I)
+		last_cli_ip = ia64_getreg(_IA64_REG_IP);
+}
+#else
+#define arch_maybe_save_ip(flags) do {} while (0)
+#endif
+
+/*
+ * - clearing psr.i is implicitly serialized (visible by next insn)
+ * - setting psr.i requires data serialization
+ * - we need a stop-bit before reading PSR because we sometimes
+ *   write a floating-point register right before reading the PSR
+ *   and that writes to PSR.mfl
+ */
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	ia64_stop();
+#ifdef CONFIG_PARAVIRT
+	return ia64_get_psr_i();
+#else
+	return ia64_getreg(_IA64_REG_PSR);
+#endif
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags = arch_local_save_flags();
+
+	ia64_stop();
+	ia64_rsm(IA64_PSR_I);
+	arch_maybe_save_ip(flags);
+	return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+#ifdef CONFIG_IA64_DEBUG_IRQ
+	arch_local_irq_save();
+#else
+	ia64_stop();
+	ia64_rsm(IA64_PSR_I);
+#endif
+}
+
+static inline void arch_local_irq_enable(void)
+{
+	ia64_stop();
+	ia64_ssm(IA64_PSR_I);
+	ia64_srlz_d();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+#ifdef CONFIG_IA64_DEBUG_IRQ
+	unsigned long old_psr = arch_local_save_flags();
+#endif
+	ia64_intrin_local_irq_restore(flags & IA64_PSR_I);
+	arch_maybe_save_ip(old_psr & ~flags);
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+	return (flags & IA64_PSR_I) == 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_safe_halt(void)
+{
+	arch_local_irq_enable();
+	ia64_pal_halt_light();	/* PAL_HALT_LIGHT */
+}
+
+
+#endif /* _ASM_IA64_IRQFLAGS_H */
diff --git a/arch/ia64/include/asm/kdebug.h b/arch/ia64/include/asm/kdebug.h
new file mode 100644
index 0000000..d11a698
--- /dev/null
+++ b/arch/ia64/include/asm/kdebug.h
@@ -0,0 +1,57 @@
+#ifndef _IA64_KDEBUG_H
+#define _IA64_KDEBUG_H 1
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ *              <anil.s.keshavamurthy@intel.com> adopted from
+ *              include/asm-x86_64/kdebug.h
+ *
+ * 2005-Oct	Keith Owens <kaos@sgi.com>.  Expand notify_die to cover more
+ *		events.
+ */
+
+enum die_val {
+	DIE_BREAK = 1,
+	DIE_FAULT,
+	DIE_OOPS,
+	DIE_MACHINE_HALT,
+	DIE_MACHINE_RESTART,
+	DIE_MCA_MONARCH_ENTER,
+	DIE_MCA_MONARCH_PROCESS,
+	DIE_MCA_MONARCH_LEAVE,
+	DIE_MCA_SLAVE_ENTER,
+	DIE_MCA_SLAVE_PROCESS,
+	DIE_MCA_SLAVE_LEAVE,
+	DIE_MCA_RENDZVOUS_ENTER,
+	DIE_MCA_RENDZVOUS_PROCESS,
+	DIE_MCA_RENDZVOUS_LEAVE,
+	DIE_MCA_NEW_TIMEOUT,
+	DIE_INIT_ENTER,
+	DIE_INIT_MONARCH_ENTER,
+	DIE_INIT_MONARCH_PROCESS,
+	DIE_INIT_MONARCH_LEAVE,
+	DIE_INIT_SLAVE_ENTER,
+	DIE_INIT_SLAVE_PROCESS,
+	DIE_INIT_SLAVE_LEAVE,
+	DIE_KDEBUG_ENTER,
+	DIE_KDEBUG_LEAVE,
+	DIE_KDUMP_ENTER,
+	DIE_KDUMP_LEAVE,
+};
+
+#endif
diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h
new file mode 100644
index 0000000..aea2b81
--- /dev/null
+++ b/arch/ia64/include/asm/kexec.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_IA64_KEXEC_H
+#define _ASM_IA64_KEXEC_H
+
+#include <asm/setup.h>
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+
+#define KEXEC_CONTROL_PAGE_SIZE (8192 + 8192 + 4096)
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_IA_64
+
+#define kexec_flush_icache_page(page) do { \
+                unsigned long page_addr = (unsigned long)page_address(page); \
+                flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
+        } while(0)
+
+extern struct kimage *ia64_kimage;
+extern const unsigned int relocate_new_kernel_size;
+extern void relocate_new_kernel(unsigned long, unsigned long,
+		struct ia64_boot_param *, unsigned long);
+static inline void
+crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
+{
+}
+extern struct resource efi_memmap_res;
+extern struct resource boot_param_res;
+extern void kdump_smp_send_stop(void);
+extern void kdump_smp_send_init(void);
+extern void kexec_disable_iosapic(void);
+extern void crash_save_this_cpu(void);
+struct rsvd_region;
+extern unsigned long kdump_find_rsvd_region(unsigned long size,
+		struct rsvd_region *rsvd_regions, int n);
+extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
+extern int kdump_status[];
+extern atomic_t kdump_cpu_freezed;
+extern atomic_t kdump_in_progress;
+
+#endif /* _ASM_IA64_KEXEC_H */
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
new file mode 100644
index 0000000..05d5f99
--- /dev/null
+++ b/arch/ia64/include/asm/kmap_types.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_KMAP_TYPES_H
+#define _ASM_IA64_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define  __WITH_KM_FENCE
+#endif
+
+#include <asm-generic/kmap_types.h>
+
+#undef __WITH_KM_FENCE
+
+#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
new file mode 100644
index 0000000..d5505d6
--- /dev/null
+++ b/arch/ia64/include/asm/kprobes.h
@@ -0,0 +1,127 @@
+#ifndef _ASM_KPROBES_H
+#define _ASM_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ *              <anil.s.keshavamurthy@intel.com> adapted from i386
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <asm/break.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE   2	/* last half is for kprobe-booster */
+#define BREAK_INST	(long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST	(long)(1<<27)
+#define BRL_INST(i1, i2) ((long)((0xcL << 37) |	/* brl */ \
+				(0x1L << 12) |	/* many */ \
+				(((i1) & 1) << 36) | ((i2) << 13))) /* imm */
+
+typedef union cmp_inst {
+	struct {
+	unsigned long long qp : 6;
+	unsigned long long p1 : 6;
+	unsigned long long c  : 1;
+	unsigned long long r2 : 7;
+	unsigned long long r3 : 7;
+	unsigned long long p2 : 6;
+	unsigned long long ta : 1;
+	unsigned long long x2 : 2;
+	unsigned long long tb : 1;
+	unsigned long long opcode : 4;
+	unsigned long long reserved : 23;
+	}f;
+	unsigned long long l;
+} cmp_inst_t;
+
+struct kprobe;
+
+typedef struct _bundle {
+	struct {
+		unsigned long long template : 5;
+		unsigned long long slot0 : 41;
+		unsigned long long slot1_p0 : 64-46;
+	} quad0;
+	struct {
+		unsigned long long slot1_p1 : 41 - (64-46);
+		unsigned long long slot2 : 41;
+	} quad1;
+} __attribute__((__aligned__(16)))  bundle_t;
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+};
+
+#define	MAX_PARAM_RSE_SIZE	(0x60+0x60/0x3f)
+/* per-cpu kprobe control block */
+#define ARCH_PREV_KPROBE_SZ 2
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	struct pt_regs jprobe_saved_regs;
+	unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
+	unsigned long *bsp;
+	unsigned long cfm;
+	atomic_t prev_kprobe_index;
+	struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
+};
+
+#define kretprobe_blacklist_size 0
+
+#define SLOT0_OPCODE_SHIFT	(37)
+#define SLOT1_p1_OPCODE_SHIFT	(37 - (64-46))
+#define SLOT2_OPCODE_SHIFT 	(37)
+
+#define INDIRECT_CALL_OPCODE		(1)
+#define IP_RELATIVE_CALL_OPCODE		(5)
+#define IP_RELATIVE_BRANCH_OPCODE	(4)
+#define IP_RELATIVE_PREDICT_OPCODE	(7)
+#define LONG_BRANCH_OPCODE		(0xC)
+#define LONG_CALL_OPCODE		(0xD)
+#define flush_insn_slot(p)		do { } while (0)
+
+typedef struct kprobe_opcode {
+	bundle_t bundle;
+} kprobe_opcode_t;
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+	/* copy of the instruction to be emulated */
+	kprobe_opcode_t *insn;
+ #define INST_FLAG_FIX_RELATIVE_IP_ADDR		1
+ #define INST_FLAG_FIX_BRANCH_REG		2
+ #define INST_FLAG_BREAK_INST			4
+ #define INST_FLAG_BOOSTABLE			8
+ 	unsigned long inst_flag;
+ 	unsigned short target_br_reg;
+	unsigned short slot;
+};
+
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+				    unsigned long val, void *data);
+
+extern void invalidate_stacked_regs(void);
+extern void flush_register_stack(void);
+extern void arch_remove_kprobe(struct kprobe *p);
+
+#endif				/* _ASM_KPROBES_H */
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
new file mode 100644
index 0000000..39e65f6
--- /dev/null
+++ b/arch/ia64/include/asm/kregs.h
@@ -0,0 +1,165 @@
+#ifndef _ASM_IA64_KREGS_H
+#define _ASM_IA64_KREGS_H
+
+/*
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * This file defines the kernel register usage convention used by Linux/ia64.
+ */
+
+/*
+ * Kernel registers:
+ */
+#define IA64_KR_IO_BASE		0	/* ar.k0: legacy I/O base address */
+#define IA64_KR_TSSD		1	/* ar.k1: IVE uses this as the TSSD */
+#define IA64_KR_PER_CPU_DATA	3	/* ar.k3: physical per-CPU base */
+#define IA64_KR_CURRENT_STACK	4	/* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
+#define IA64_KR_FPU_OWNER	5	/* ar.k5: fpu-owner (UP only, at the moment) */
+#define IA64_KR_CURRENT		6	/* ar.k6: "current" task pointer */
+#define IA64_KR_PT_BASE		7	/* ar.k7: page table base address (physical) */
+
+#define _IA64_KR_PASTE(x,y)	x##y
+#define _IA64_KR_PREFIX(n)	_IA64_KR_PASTE(ar.k, n)
+#define IA64_KR(n)		_IA64_KR_PREFIX(IA64_KR_##n)
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_KERNEL		0	/* itr0, dtr0: maps kernel image (code & data) */
+#define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
+#define IA64_TR_CURRENT_STACK	1	/* dtr1: maps kernel's memory- & register-stacks */
+
+#define IA64_TR_ALLOC_BASE	2 	/* itr&dtr: Base of dynamic TR resource*/
+#define IA64_TR_ALLOC_MAX	64 	/* Max number for dynamic use*/
+
+/* Processor status register bits: */
+#define IA64_PSR_BE_BIT		1
+#define IA64_PSR_UP_BIT		2
+#define IA64_PSR_AC_BIT		3
+#define IA64_PSR_MFL_BIT	4
+#define IA64_PSR_MFH_BIT	5
+#define IA64_PSR_IC_BIT		13
+#define IA64_PSR_I_BIT		14
+#define IA64_PSR_PK_BIT		15
+#define IA64_PSR_DT_BIT		17
+#define IA64_PSR_DFL_BIT	18
+#define IA64_PSR_DFH_BIT	19
+#define IA64_PSR_SP_BIT		20
+#define IA64_PSR_PP_BIT		21
+#define IA64_PSR_DI_BIT		22
+#define IA64_PSR_SI_BIT		23
+#define IA64_PSR_DB_BIT		24
+#define IA64_PSR_LP_BIT		25
+#define IA64_PSR_TB_BIT		26
+#define IA64_PSR_RT_BIT		27
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL0_BIT	32
+#define IA64_PSR_CPL1_BIT	33
+#define IA64_PSR_IS_BIT		34
+#define IA64_PSR_MC_BIT		35
+#define IA64_PSR_IT_BIT		36
+#define IA64_PSR_ID_BIT		37
+#define IA64_PSR_DA_BIT		38
+#define IA64_PSR_DD_BIT		39
+#define IA64_PSR_SS_BIT		40
+#define IA64_PSR_RI_BIT		41
+#define IA64_PSR_ED_BIT		43
+#define IA64_PSR_BN_BIT		44
+#define IA64_PSR_IA_BIT		45
+
+/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
+   execve().  Only list flags here that need to be cleared/set for BOTH clone2() and
+   execve().  */
+#define IA64_PSR_BITS_TO_CLEAR	(IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
+				 IA64_PSR_TB  | IA64_PSR_ID  | IA64_PSR_DA | IA64_PSR_DD | \
+				 IA64_PSR_SS  | IA64_PSR_ED  | IA64_PSR_IA)
+#define IA64_PSR_BITS_TO_SET	(IA64_PSR_DFH | IA64_PSR_SP)
+
+#define IA64_PSR_BE	(__IA64_UL(1) << IA64_PSR_BE_BIT)
+#define IA64_PSR_UP	(__IA64_UL(1) << IA64_PSR_UP_BIT)
+#define IA64_PSR_AC	(__IA64_UL(1) << IA64_PSR_AC_BIT)
+#define IA64_PSR_MFL	(__IA64_UL(1) << IA64_PSR_MFL_BIT)
+#define IA64_PSR_MFH	(__IA64_UL(1) << IA64_PSR_MFH_BIT)
+#define IA64_PSR_IC	(__IA64_UL(1) << IA64_PSR_IC_BIT)
+#define IA64_PSR_I	(__IA64_UL(1) << IA64_PSR_I_BIT)
+#define IA64_PSR_PK	(__IA64_UL(1) << IA64_PSR_PK_BIT)
+#define IA64_PSR_DT	(__IA64_UL(1) << IA64_PSR_DT_BIT)
+#define IA64_PSR_DFL	(__IA64_UL(1) << IA64_PSR_DFL_BIT)
+#define IA64_PSR_DFH	(__IA64_UL(1) << IA64_PSR_DFH_BIT)
+#define IA64_PSR_SP	(__IA64_UL(1) << IA64_PSR_SP_BIT)
+#define IA64_PSR_PP	(__IA64_UL(1) << IA64_PSR_PP_BIT)
+#define IA64_PSR_DI	(__IA64_UL(1) << IA64_PSR_DI_BIT)
+#define IA64_PSR_SI	(__IA64_UL(1) << IA64_PSR_SI_BIT)
+#define IA64_PSR_DB	(__IA64_UL(1) << IA64_PSR_DB_BIT)
+#define IA64_PSR_LP	(__IA64_UL(1) << IA64_PSR_LP_BIT)
+#define IA64_PSR_TB	(__IA64_UL(1) << IA64_PSR_TB_BIT)
+#define IA64_PSR_RT	(__IA64_UL(1) << IA64_PSR_RT_BIT)
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL	(__IA64_UL(3) << IA64_PSR_CPL0_BIT)
+#define IA64_PSR_IS	(__IA64_UL(1) << IA64_PSR_IS_BIT)
+#define IA64_PSR_MC	(__IA64_UL(1) << IA64_PSR_MC_BIT)
+#define IA64_PSR_IT	(__IA64_UL(1) << IA64_PSR_IT_BIT)
+#define IA64_PSR_ID	(__IA64_UL(1) << IA64_PSR_ID_BIT)
+#define IA64_PSR_DA	(__IA64_UL(1) << IA64_PSR_DA_BIT)
+#define IA64_PSR_DD	(__IA64_UL(1) << IA64_PSR_DD_BIT)
+#define IA64_PSR_SS	(__IA64_UL(1) << IA64_PSR_SS_BIT)
+#define IA64_PSR_RI	(__IA64_UL(3) << IA64_PSR_RI_BIT)
+#define IA64_PSR_ED	(__IA64_UL(1) << IA64_PSR_ED_BIT)
+#define IA64_PSR_BN	(__IA64_UL(1) << IA64_PSR_BN_BIT)
+#define IA64_PSR_IA	(__IA64_UL(1) << IA64_PSR_IA_BIT)
+
+/* User mask bits: */
+#define IA64_PSR_UM	(IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
+
+/* Default Control Register */
+#define IA64_DCR_PP_BIT		 0	/* privileged performance monitor default */
+#define IA64_DCR_BE_BIT		 1	/* big-endian default */
+#define IA64_DCR_LC_BIT		 2	/* ia32 lock-check enable */
+#define IA64_DCR_DM_BIT		 8	/* defer TLB miss faults */
+#define IA64_DCR_DP_BIT		 9	/* defer page-not-present faults */
+#define IA64_DCR_DK_BIT		10	/* defer key miss faults */
+#define IA64_DCR_DX_BIT		11	/* defer key permission faults */
+#define IA64_DCR_DR_BIT		12	/* defer access right faults */
+#define IA64_DCR_DA_BIT		13	/* defer access bit faults */
+#define IA64_DCR_DD_BIT		14	/* defer debug faults */
+
+#define IA64_DCR_PP	(__IA64_UL(1) << IA64_DCR_PP_BIT)
+#define IA64_DCR_BE	(__IA64_UL(1) << IA64_DCR_BE_BIT)
+#define IA64_DCR_LC	(__IA64_UL(1) << IA64_DCR_LC_BIT)
+#define IA64_DCR_DM	(__IA64_UL(1) << IA64_DCR_DM_BIT)
+#define IA64_DCR_DP	(__IA64_UL(1) << IA64_DCR_DP_BIT)
+#define IA64_DCR_DK	(__IA64_UL(1) << IA64_DCR_DK_BIT)
+#define IA64_DCR_DX	(__IA64_UL(1) << IA64_DCR_DX_BIT)
+#define IA64_DCR_DR	(__IA64_UL(1) << IA64_DCR_DR_BIT)
+#define IA64_DCR_DA	(__IA64_UL(1) << IA64_DCR_DA_BIT)
+#define IA64_DCR_DD	(__IA64_UL(1) << IA64_DCR_DD_BIT)
+
+/* Interrupt Status Register */
+#define IA64_ISR_X_BIT		32	/* execute access */
+#define IA64_ISR_W_BIT		33	/* write access */
+#define IA64_ISR_R_BIT		34	/* read access */
+#define IA64_ISR_NA_BIT		35	/* non-access */
+#define IA64_ISR_SP_BIT		36	/* speculative load exception */
+#define IA64_ISR_RS_BIT		37	/* mandatory register-stack exception */
+#define IA64_ISR_IR_BIT		38	/* invalid register frame exception */
+#define IA64_ISR_CODE_MASK	0xf
+
+#define IA64_ISR_X	(__IA64_UL(1) << IA64_ISR_X_BIT)
+#define IA64_ISR_W	(__IA64_UL(1) << IA64_ISR_W_BIT)
+#define IA64_ISR_R	(__IA64_UL(1) << IA64_ISR_R_BIT)
+#define IA64_ISR_NA	(__IA64_UL(1) << IA64_ISR_NA_BIT)
+#define IA64_ISR_SP	(__IA64_UL(1) << IA64_ISR_SP_BIT)
+#define IA64_ISR_RS	(__IA64_UL(1) << IA64_ISR_RS_BIT)
+#define IA64_ISR_IR	(__IA64_UL(1) << IA64_ISR_IR_BIT)
+
+/* ISR code field for non-access instructions */
+#define IA64_ISR_CODE_TPA	0
+#define IA64_ISR_CODE_FC	1
+#define IA64_ISR_CODE_PROBE	2
+#define IA64_ISR_CODE_TAK	3
+#define IA64_ISR_CODE_LFETCH	4
+#define IA64_ISR_CODE_PROBEF	5
+
+#endif /* _ASM_IA64_kREGS_H */
diff --git a/arch/ia64/include/asm/libata-portmap.h b/arch/ia64/include/asm/libata-portmap.h
new file mode 100644
index 0000000..0e00c9a
--- /dev/null
+++ b/arch/ia64/include/asm/libata-portmap.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_IA64_LIBATA_PORTMAP_H
+#define __ASM_IA64_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_CMD		0x1F0
+#define ATA_PRIMARY_CTL		0x3F6
+#define ATA_PRIMARY_IRQ(dev)	isa_irq_to_vector(14)
+
+#define ATA_SECONDARY_CMD	0x170
+#define ATA_SECONDARY_CTL	0x376
+#define ATA_SECONDARY_IRQ(dev)	isa_irq_to_vector(15)
+
+#endif
diff --git a/arch/ia64/include/asm/linkage.h b/arch/ia64/include/asm/linkage.h
new file mode 100644
index 0000000..7875757
--- /dev/null
+++ b/arch/ia64/include/asm/linkage.h
@@ -0,0 +1,18 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifndef __ASSEMBLY__
+
+#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
+
+#else
+
+#include <asm/asmmacro.h>
+
+#endif
+
+#define cond_syscall(x) asm(".weak\t" #x "#\n" #x "#\t=\tsys_ni_syscall#")
+#define SYSCALL_ALIAS(alias, name)					\
+	asm ( #alias "# = " #name "#\n\t.globl " #alias "#")
+
+#endif
diff --git a/arch/ia64/include/asm/local.h b/arch/ia64/include/asm/local.h
new file mode 100644
index 0000000..c11c530
--- /dev/null
+++ b/arch/ia64/include/asm/local.h
@@ -0,0 +1 @@
+#include <asm-generic/local.h>
diff --git a/arch/ia64/include/asm/local64.h b/arch/ia64/include/asm/local64.h
new file mode 100644
index 0000000..36c93b5
--- /dev/null
+++ b/arch/ia64/include/asm/local64.h
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
new file mode 100644
index 0000000..9c39bdf
--- /dev/null
+++ b/arch/ia64/include/asm/machvec.h
@@ -0,0 +1,367 @@
+/*
+ * Machine vector for IA-64.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_MACHVEC_H
+#define _ASM_IA64_MACHVEC_H
+
+#include <linux/types.h>
+
+/* forward declarations: */
+struct device;
+struct pt_regs;
+struct scatterlist;
+struct page;
+struct mm_struct;
+struct pci_bus;
+struct task_struct;
+struct pci_dev;
+struct msi_desc;
+struct dma_attrs;
+
+typedef void ia64_mv_setup_t (char **);
+typedef void ia64_mv_cpu_init_t (void);
+typedef void ia64_mv_irq_init_t (void);
+typedef void ia64_mv_send_ipi_t (int, int, int, int);
+typedef void ia64_mv_timer_interrupt_t (int, void *);
+typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
+typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
+typedef u8 ia64_mv_irq_to_vector (int);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8);
+typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
+typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
+				       u8 size);
+typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
+					u8 size);
+typedef void ia64_mv_migrate_t(struct task_struct * task);
+typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
+typedef void ia64_mv_kernel_launch_event_t(void);
+
+/* DMA-mapping interface: */
+typedef void ia64_mv_dma_init (void);
+typedef u64 ia64_mv_dma_get_required_mask (struct device *);
+typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
+
+/*
+ * WARNING: The legacy I/O space is _architected_.  Platforms are
+ * expected to follow this architected model (see Section 10.7 in the
+ * IA-64 Architecture Software Developer's Manual).  Unfortunately,
+ * some broken machines do not follow that model, which is why we have
+ * to make the inX/outX operations part of the machine vector.
+ * Platform designers should follow the architected model whenever
+ * possible.
+ */
+typedef unsigned int ia64_mv_inb_t (unsigned long);
+typedef unsigned int ia64_mv_inw_t (unsigned long);
+typedef unsigned int ia64_mv_inl_t (unsigned long);
+typedef void ia64_mv_outb_t (unsigned char, unsigned long);
+typedef void ia64_mv_outw_t (unsigned short, unsigned long);
+typedef void ia64_mv_outl_t (unsigned int, unsigned long);
+typedef void ia64_mv_mmiowb_t (void);
+typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
+typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
+
+typedef int ia64_mv_setup_msi_irq_t (struct pci_dev *pdev, struct msi_desc *);
+typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq);
+
+static inline void
+machvec_noop (void)
+{
+}
+
+static inline void
+machvec_noop_mm (struct mm_struct *mm)
+{
+}
+
+static inline void
+machvec_noop_task (struct task_struct *task)
+{
+}
+
+static inline void
+machvec_noop_bus (struct pci_bus *bus)
+{
+}
+
+extern void machvec_setup (char **);
+extern void machvec_timer_interrupt (int, void *);
+extern void machvec_tlb_migrate_finish (struct mm_struct *);
+
+# if defined (CONFIG_IA64_HP_SIM)
+#  include <asm/machvec_hpsim.h>
+# elif defined (CONFIG_IA64_DIG)
+#  include <asm/machvec_dig.h>
+# elif defined(CONFIG_IA64_DIG_VTD)
+#  include <asm/machvec_dig_vtd.h>
+# elif defined (CONFIG_IA64_HP_ZX1)
+#  include <asm/machvec_hpzx1.h>
+# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
+#  include <asm/machvec_hpzx1_swiotlb.h>
+# elif defined (CONFIG_IA64_SGI_SN2)
+#  include <asm/machvec_sn2.h>
+# elif defined (CONFIG_IA64_SGI_UV)
+#  include <asm/machvec_uv.h>
+# elif defined (CONFIG_IA64_GENERIC)
+
+# ifdef MACHVEC_PLATFORM_HEADER
+#  include MACHVEC_PLATFORM_HEADER
+# else
+#  define ia64_platform_name	ia64_mv.name
+#  define platform_setup	ia64_mv.setup
+#  define platform_cpu_init	ia64_mv.cpu_init
+#  define platform_irq_init	ia64_mv.irq_init
+#  define platform_send_ipi	ia64_mv.send_ipi
+#  define platform_timer_interrupt	ia64_mv.timer_interrupt
+#  define platform_global_tlb_purge	ia64_mv.global_tlb_purge
+#  define platform_tlb_migrate_finish	ia64_mv.tlb_migrate_finish
+#  define platform_dma_init		ia64_mv.dma_init
+#  define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
+#  define platform_dma_get_ops		ia64_mv.dma_get_ops
+#  define platform_irq_to_vector	ia64_mv.irq_to_vector
+#  define platform_local_vector_to_irq	ia64_mv.local_vector_to_irq
+#  define platform_pci_get_legacy_mem	ia64_mv.pci_get_legacy_mem
+#  define platform_pci_legacy_read	ia64_mv.pci_legacy_read
+#  define platform_pci_legacy_write	ia64_mv.pci_legacy_write
+#  define platform_inb		ia64_mv.inb
+#  define platform_inw		ia64_mv.inw
+#  define platform_inl		ia64_mv.inl
+#  define platform_outb		ia64_mv.outb
+#  define platform_outw		ia64_mv.outw
+#  define platform_outl		ia64_mv.outl
+#  define platform_mmiowb	ia64_mv.mmiowb
+#  define platform_readb        ia64_mv.readb
+#  define platform_readw        ia64_mv.readw
+#  define platform_readl        ia64_mv.readl
+#  define platform_readq        ia64_mv.readq
+#  define platform_readb_relaxed        ia64_mv.readb_relaxed
+#  define platform_readw_relaxed        ia64_mv.readw_relaxed
+#  define platform_readl_relaxed        ia64_mv.readl_relaxed
+#  define platform_readq_relaxed        ia64_mv.readq_relaxed
+#  define platform_migrate		ia64_mv.migrate
+#  define platform_setup_msi_irq	ia64_mv.setup_msi_irq
+#  define platform_teardown_msi_irq	ia64_mv.teardown_msi_irq
+#  define platform_pci_fixup_bus	ia64_mv.pci_fixup_bus
+#  define platform_kernel_launch_event	ia64_mv.kernel_launch_event
+# endif
+
+/* __attribute__((__aligned__(16))) is required to make size of the
+ * structure multiple of 16 bytes.
+ * This will fillup the holes created because of section 3.3.1 in
+ * Software Conventions guide.
+ */
+struct ia64_machine_vector {
+	const char *name;
+	ia64_mv_setup_t *setup;
+	ia64_mv_cpu_init_t *cpu_init;
+	ia64_mv_irq_init_t *irq_init;
+	ia64_mv_send_ipi_t *send_ipi;
+	ia64_mv_timer_interrupt_t *timer_interrupt;
+	ia64_mv_global_tlb_purge_t *global_tlb_purge;
+	ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
+	ia64_mv_dma_init *dma_init;
+	ia64_mv_dma_get_required_mask *dma_get_required_mask;
+	ia64_mv_dma_get_ops *dma_get_ops;
+	ia64_mv_irq_to_vector *irq_to_vector;
+	ia64_mv_local_vector_to_irq *local_vector_to_irq;
+	ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
+	ia64_mv_pci_legacy_read_t *pci_legacy_read;
+	ia64_mv_pci_legacy_write_t *pci_legacy_write;
+	ia64_mv_inb_t *inb;
+	ia64_mv_inw_t *inw;
+	ia64_mv_inl_t *inl;
+	ia64_mv_outb_t *outb;
+	ia64_mv_outw_t *outw;
+	ia64_mv_outl_t *outl;
+	ia64_mv_mmiowb_t *mmiowb;
+	ia64_mv_readb_t *readb;
+	ia64_mv_readw_t *readw;
+	ia64_mv_readl_t *readl;
+	ia64_mv_readq_t *readq;
+	ia64_mv_readb_relaxed_t *readb_relaxed;
+	ia64_mv_readw_relaxed_t *readw_relaxed;
+	ia64_mv_readl_relaxed_t *readl_relaxed;
+	ia64_mv_readq_relaxed_t *readq_relaxed;
+	ia64_mv_migrate_t *migrate;
+	ia64_mv_setup_msi_irq_t *setup_msi_irq;
+	ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
+	ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
+	ia64_mv_kernel_launch_event_t *kernel_launch_event;
+} __attribute__((__aligned__(16))); /* align attrib? see above comment */
+
+#define MACHVEC_INIT(name)			\
+{						\
+	#name,					\
+	platform_setup,				\
+	platform_cpu_init,			\
+	platform_irq_init,			\
+	platform_send_ipi,			\
+	platform_timer_interrupt,		\
+	platform_global_tlb_purge,		\
+	platform_tlb_migrate_finish,		\
+	platform_dma_init,			\
+	platform_dma_get_required_mask,		\
+	platform_dma_get_ops,			\
+	platform_irq_to_vector,			\
+	platform_local_vector_to_irq,		\
+	platform_pci_get_legacy_mem,		\
+	platform_pci_legacy_read,		\
+	platform_pci_legacy_write,		\
+	platform_inb,				\
+	platform_inw,				\
+	platform_inl,				\
+	platform_outb,				\
+	platform_outw,				\
+	platform_outl,				\
+	platform_mmiowb,			\
+	platform_readb,				\
+	platform_readw,				\
+	platform_readl,				\
+	platform_readq,				\
+	platform_readb_relaxed,			\
+	platform_readw_relaxed,			\
+	platform_readl_relaxed,			\
+	platform_readq_relaxed,			\
+	platform_migrate,			\
+	platform_setup_msi_irq,			\
+	platform_teardown_msi_irq,		\
+	platform_pci_fixup_bus,			\
+	platform_kernel_launch_event            \
+}
+
+extern struct ia64_machine_vector ia64_mv;
+extern void machvec_init (const char *name);
+extern void machvec_init_from_cmdline(const char *cmdline);
+
+# else
+#  error Unknown configuration.  Update arch/ia64/include/asm/machvec.h.
+# endif /* CONFIG_IA64_GENERIC */
+
+extern void swiotlb_dma_init(void);
+extern struct dma_map_ops *dma_get_ops(struct device *);
+
+/*
+ * Define default versions so we can extend machvec for new platforms without having
+ * to update the machvec files for all existing platforms.
+ */
+#ifndef platform_setup
+# define platform_setup			machvec_setup
+#endif
+#ifndef platform_cpu_init
+# define platform_cpu_init		machvec_noop
+#endif
+#ifndef platform_irq_init
+# define platform_irq_init		machvec_noop
+#endif
+
+#ifndef platform_send_ipi
+# define platform_send_ipi		ia64_send_ipi	/* default to architected version */
+#endif
+#ifndef platform_timer_interrupt
+# define platform_timer_interrupt 	machvec_timer_interrupt
+#endif
+#ifndef platform_global_tlb_purge
+# define platform_global_tlb_purge	ia64_global_tlb_purge /* default to architected version */
+#endif
+#ifndef platform_tlb_migrate_finish
+# define platform_tlb_migrate_finish	machvec_noop_mm
+#endif
+#ifndef platform_kernel_launch_event
+# define platform_kernel_launch_event	machvec_noop
+#endif
+#ifndef platform_dma_init
+# define platform_dma_init		swiotlb_dma_init
+#endif
+#ifndef platform_dma_get_ops
+# define platform_dma_get_ops		dma_get_ops
+#endif
+#ifndef platform_dma_get_required_mask
+# define  platform_dma_get_required_mask	ia64_dma_get_required_mask
+#endif
+#ifndef platform_irq_to_vector
+# define platform_irq_to_vector		__ia64_irq_to_vector
+#endif
+#ifndef platform_local_vector_to_irq
+# define platform_local_vector_to_irq	__ia64_local_vector_to_irq
+#endif
+#ifndef platform_pci_get_legacy_mem
+# define platform_pci_get_legacy_mem	ia64_pci_get_legacy_mem
+#endif
+#ifndef platform_pci_legacy_read
+# define platform_pci_legacy_read	ia64_pci_legacy_read
+extern int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
+#endif
+#ifndef platform_pci_legacy_write
+# define platform_pci_legacy_write	ia64_pci_legacy_write
+extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
+#endif
+#ifndef platform_inb
+# define platform_inb		__ia64_inb
+#endif
+#ifndef platform_inw
+# define platform_inw		__ia64_inw
+#endif
+#ifndef platform_inl
+# define platform_inl		__ia64_inl
+#endif
+#ifndef platform_outb
+# define platform_outb		__ia64_outb
+#endif
+#ifndef platform_outw
+# define platform_outw		__ia64_outw
+#endif
+#ifndef platform_outl
+# define platform_outl		__ia64_outl
+#endif
+#ifndef platform_mmiowb
+# define platform_mmiowb	__ia64_mmiowb
+#endif
+#ifndef platform_readb
+# define platform_readb		__ia64_readb
+#endif
+#ifndef platform_readw
+# define platform_readw		__ia64_readw
+#endif
+#ifndef platform_readl
+# define platform_readl		__ia64_readl
+#endif
+#ifndef platform_readq
+# define platform_readq		__ia64_readq
+#endif
+#ifndef platform_readb_relaxed
+# define platform_readb_relaxed	__ia64_readb_relaxed
+#endif
+#ifndef platform_readw_relaxed
+# define platform_readw_relaxed	__ia64_readw_relaxed
+#endif
+#ifndef platform_readl_relaxed
+# define platform_readl_relaxed	__ia64_readl_relaxed
+#endif
+#ifndef platform_readq_relaxed
+# define platform_readq_relaxed	__ia64_readq_relaxed
+#endif
+#ifndef platform_migrate
+# define platform_migrate machvec_noop_task
+#endif
+#ifndef platform_setup_msi_irq
+# define platform_setup_msi_irq		((ia64_mv_setup_msi_irq_t*)NULL)
+#endif
+#ifndef platform_teardown_msi_irq
+# define platform_teardown_msi_irq	((ia64_mv_teardown_msi_irq_t*)NULL)
+#endif
+#ifndef platform_pci_fixup_bus
+# define platform_pci_fixup_bus	machvec_noop_bus
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/arch/ia64/include/asm/machvec_dig.h b/arch/ia64/include/asm/machvec_dig.h
new file mode 100644
index 0000000..1f7403a
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_h
+#define _ASM_IA64_MACHVEC_DIG_h
+
+extern ia64_mv_setup_t dig_setup;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name	"dig"
+#define platform_setup		dig_setup
+
+#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
new file mode 100644
index 0000000..44308b4
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
+#define _ASM_IA64_MACHVEC_DIG_VTD_h
+
+extern ia64_mv_setup_t			dig_setup;
+extern ia64_mv_dma_init			pci_iommu_alloc;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name			"dig_vtd"
+#define platform_setup				dig_setup
+#define platform_dma_init			pci_iommu_alloc
+
+#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpsim.h b/arch/ia64/include/asm/machvec_hpsim.h
new file mode 100644
index 0000000..e757112
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_hpsim.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_IA64_MACHVEC_HPSIM_h
+#define _ASM_IA64_MACHVEC_HPSIM_h
+
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name	"hpsim"
+#define platform_setup		hpsim_setup
+#define platform_irq_init	hpsim_irq_init
+
+#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
new file mode 100644
index 0000000..c74d315
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_IA64_MACHVEC_HPZX1_h
+#define _ASM_IA64_MACHVEC_HPZX1_h
+
+extern ia64_mv_setup_t			dig_setup;
+extern ia64_mv_dma_init			sba_dma_init;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name			"hpzx1"
+#define platform_setup				dig_setup
+#define platform_dma_init			sba_dma_init
+
+#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
new file mode 100644
index 0000000..906ef62
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
+#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
+
+extern ia64_mv_setup_t				dig_setup;
+extern ia64_mv_dma_get_ops			hwsw_dma_get_ops;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name			"hpzx1_swiotlb"
+#define platform_setup				dig_setup
+#define platform_dma_init			machvec_noop
+#define platform_dma_get_ops			hwsw_dma_get_ops
+
+#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
new file mode 100644
index 0000000..37a4698
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -0,0 +1,35 @@
+#include <asm/iommu.h>
+#include <asm/machvec.h>
+
+extern ia64_mv_send_ipi_t ia64_send_ipi;
+extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
+extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
+extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
+extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
+
+extern ia64_mv_inb_t __ia64_inb;
+extern ia64_mv_inw_t __ia64_inw;
+extern ia64_mv_inl_t __ia64_inl;
+extern ia64_mv_outb_t __ia64_outb;
+extern ia64_mv_outw_t __ia64_outw;
+extern ia64_mv_outl_t __ia64_outl;
+extern ia64_mv_mmiowb_t __ia64_mmiowb;
+extern ia64_mv_readb_t __ia64_readb;
+extern ia64_mv_readw_t __ia64_readw;
+extern ia64_mv_readl_t __ia64_readl;
+extern ia64_mv_readq_t __ia64_readq;
+extern ia64_mv_readb_t __ia64_readb_relaxed;
+extern ia64_mv_readw_t __ia64_readw_relaxed;
+extern ia64_mv_readl_t __ia64_readl_relaxed;
+extern ia64_mv_readq_t __ia64_readq_relaxed;
+
+#define MACHVEC_HELPER(name)									\
+ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec")))	\
+	= MACHVEC_INIT(name);
+
+#define MACHVEC_DEFINE(name)	MACHVEC_HELPER(name)
+
+MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
new file mode 100644
index 0000000..ece9fa8
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it 
+ * under the terms of version 2 of the GNU General Public License 
+ * as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope that it would be useful, but 
+ * WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
+ * 
+ * Further, this software is distributed without any warranty that it is 
+ * free of the rightful claim of any third person regarding infringement 
+ * or the like.  Any license provided herein, whether implied or 
+ * otherwise, applies only to this software file.  Patent licenses, if 
+ * any, provided herein do not apply to combinations of this program with 
+ * other software, or any other product whatsoever.
+ * 
+ * You should have received a copy of the GNU General Public 
+ * License along with this program; if not, write the Free Software 
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * 
+ * For further information regarding this notice, see: 
+ * 
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#ifndef _ASM_IA64_MACHVEC_SN2_H
+#define _ASM_IA64_MACHVEC_SN2_H
+
+extern ia64_mv_setup_t sn_setup;
+extern ia64_mv_cpu_init_t sn_cpu_init;
+extern ia64_mv_irq_init_t sn_irq_init;
+extern ia64_mv_send_ipi_t sn2_send_IPI;
+extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
+extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
+extern ia64_mv_tlb_migrate_finish_t	sn_tlb_migrate_finish;
+extern ia64_mv_irq_to_vector sn_irq_to_vector;
+extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
+extern ia64_mv_inb_t __sn_inb;
+extern ia64_mv_inw_t __sn_inw;
+extern ia64_mv_inl_t __sn_inl;
+extern ia64_mv_outb_t __sn_outb;
+extern ia64_mv_outw_t __sn_outw;
+extern ia64_mv_outl_t __sn_outl;
+extern ia64_mv_mmiowb_t __sn_mmiowb;
+extern ia64_mv_readb_t __sn_readb;
+extern ia64_mv_readw_t __sn_readw;
+extern ia64_mv_readl_t __sn_readl;
+extern ia64_mv_readq_t __sn_readq;
+extern ia64_mv_readb_t __sn_readb_relaxed;
+extern ia64_mv_readw_t __sn_readw_relaxed;
+extern ia64_mv_readl_t __sn_readl_relaxed;
+extern ia64_mv_readq_t __sn_readq_relaxed;
+extern ia64_mv_dma_get_required_mask	sn_dma_get_required_mask;
+extern ia64_mv_dma_init			sn_dma_init;
+extern ia64_mv_migrate_t		sn_migrate;
+extern ia64_mv_kernel_launch_event_t	sn_kernel_launch_event;
+extern ia64_mv_setup_msi_irq_t		sn_setup_msi_irq;
+extern ia64_mv_teardown_msi_irq_t	sn_teardown_msi_irq;
+extern ia64_mv_pci_fixup_bus_t		sn_pci_fixup_bus;
+
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name		"sn2"
+#define platform_setup			sn_setup
+#define platform_cpu_init		sn_cpu_init
+#define platform_irq_init		sn_irq_init
+#define platform_send_ipi		sn2_send_IPI
+#define platform_timer_interrupt	sn_timer_interrupt
+#define platform_global_tlb_purge       sn2_global_tlb_purge
+#define platform_tlb_migrate_finish	sn_tlb_migrate_finish
+#define platform_pci_fixup		sn_pci_fixup
+#define platform_inb			__sn_inb
+#define platform_inw			__sn_inw
+#define platform_inl			__sn_inl
+#define platform_outb			__sn_outb
+#define platform_outw			__sn_outw
+#define platform_outl			__sn_outl
+#define platform_mmiowb			__sn_mmiowb
+#define platform_readb			__sn_readb
+#define platform_readw			__sn_readw
+#define platform_readl			__sn_readl
+#define platform_readq			__sn_readq
+#define platform_readb_relaxed		__sn_readb_relaxed
+#define platform_readw_relaxed		__sn_readw_relaxed
+#define platform_readl_relaxed		__sn_readl_relaxed
+#define platform_readq_relaxed		__sn_readq_relaxed
+#define platform_irq_to_vector		sn_irq_to_vector
+#define platform_local_vector_to_irq	sn_local_vector_to_irq
+#define platform_pci_get_legacy_mem	sn_pci_get_legacy_mem
+#define platform_pci_legacy_read	sn_pci_legacy_read
+#define platform_pci_legacy_write	sn_pci_legacy_write
+#define platform_dma_get_required_mask	sn_dma_get_required_mask
+#define platform_dma_init		sn_dma_init
+#define platform_migrate		sn_migrate
+#define platform_kernel_launch_event    sn_kernel_launch_event
+#ifdef CONFIG_PCI_MSI
+#define platform_setup_msi_irq		sn_setup_msi_irq
+#define platform_teardown_msi_irq	sn_teardown_msi_irq
+#else
+#define platform_setup_msi_irq		((ia64_mv_setup_msi_irq_t*)NULL)
+#define platform_teardown_msi_irq	((ia64_mv_teardown_msi_irq_t*)NULL)
+#endif
+#define platform_pci_fixup_bus		sn_pci_fixup_bus
+
+#include <asm/sn/io.h>
+
+#endif /* _ASM_IA64_MACHVEC_SN2_H */
diff --git a/arch/ia64/include/asm/machvec_uv.h b/arch/ia64/include/asm/machvec_uv.h
new file mode 100644
index 0000000..2c50853
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_uv.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV Core Functions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_MACHVEC_UV_H
+#define _ASM_IA64_MACHVEC_UV_H
+
+extern ia64_mv_setup_t uv_setup;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define ia64_platform_name		"uv"
+#define platform_setup			uv_setup
+
+#endif /* _ASM_IA64_MACHVEC_UV_H */
diff --git a/arch/ia64/include/asm/mc146818rtc.h b/arch/ia64/include/asm/mc146818rtc.h
new file mode 100644
index 0000000..407787a
--- /dev/null
+++ b/arch/ia64/include/asm/mc146818rtc.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_IA64_MC146818RTC_H
+#define _ASM_IA64_MC146818RTC_H
+
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+
+/* empty include file to satisfy the include in genrtc.c */
+
+#endif /* _ASM_IA64_MC146818RTC_H */
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
new file mode 100644
index 0000000..8c70961
--- /dev/null
+++ b/arch/ia64/include/asm/mca.h
@@ -0,0 +1,187 @@
+/*
+ * File:	mca.h
+ * Purpose:	Machine check handling specific defines
+ *
+ * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Russ Anderson <rja@sgi.com>
+ */
+
+#ifndef _ASM_IA64_MCA_H
+#define _ASM_IA64_MCA_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include <asm/param.h>
+#include <asm/sal.h>
+#include <asm/processor.h>
+#include <asm/mca_asm.h>
+
+#define IA64_MCA_RENDEZ_TIMEOUT		(20 * 1000)	/* value in milliseconds - 20 seconds */
+
+typedef struct ia64_fptr {
+	unsigned long fp;
+	unsigned long gp;
+} ia64_fptr_t;
+
+typedef union cmcv_reg_u {
+	u64	cmcv_regval;
+	struct	{
+		u64	cmcr_vector		: 8;
+		u64	cmcr_reserved1		: 4;
+		u64	cmcr_ignored1		: 1;
+		u64	cmcr_reserved2		: 3;
+		u64	cmcr_mask		: 1;
+		u64	cmcr_ignored2		: 47;
+	} cmcv_reg_s;
+
+} cmcv_reg_t;
+
+#define cmcv_mask		cmcv_reg_s.cmcr_mask
+#define cmcv_vector		cmcv_reg_s.cmcr_vector
+
+enum {
+	IA64_MCA_RENDEZ_CHECKIN_NOTDONE	=	0x0,
+	IA64_MCA_RENDEZ_CHECKIN_DONE	=	0x1,
+	IA64_MCA_RENDEZ_CHECKIN_INIT	=	0x2,
+	IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA	=	0x3,
+};
+
+/* Information maintained by the MC infrastructure */
+typedef struct ia64_mc_info_s {
+	u64		imi_mca_handler;
+	size_t		imi_mca_handler_size;
+	u64		imi_monarch_init_handler;
+	size_t		imi_monarch_init_handler_size;
+	u64		imi_slave_init_handler;
+	size_t		imi_slave_init_handler_size;
+	u8		imi_rendez_checkin[NR_CPUS];
+
+} ia64_mc_info_t;
+
+/* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
+ * Besides the handover state, it also contains some saved registers from the
+ * time of the event.
+ * Note: mca_asm.S depends on the precise layout of this structure.
+ */
+
+struct ia64_sal_os_state {
+
+	/* SAL to OS */
+	unsigned long		os_gp;			/* GP of the os registered with the SAL, physical */
+	unsigned long		pal_proc;		/* PAL_PROC entry point, physical */
+	unsigned long		sal_proc;		/* SAL_PROC entry point, physical */
+	unsigned long		rv_rc;			/* MCA - Rendezvous state, INIT - reason code */
+	unsigned long		proc_state_param;	/* from R18 */
+	unsigned long		monarch;		/* 1 for a monarch event, 0 for a slave */
+
+	/* common */
+	unsigned long		sal_ra;			/* Return address in SAL, physical */
+	unsigned long		sal_gp;			/* GP of the SAL - physical */
+	pal_min_state_area_t	*pal_min_state;		/* from R17.  physical in asm, virtual in C */
+	/* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
+	 * Note: if the MCA/INIT recovery code wants to resume to a new context
+	 * then it must change these values to reflect the new kernel stack.
+	 */
+	unsigned long		prev_IA64_KR_CURRENT;	/* previous value of IA64_KR(CURRENT) */
+	unsigned long		prev_IA64_KR_CURRENT_STACK;
+	struct task_struct	*prev_task;		/* previous task, NULL if it is not useful */
+	/* Some interrupt registers are not saved in minstate, pt_regs or
+	 * switch_stack.  Because MCA/INIT can occur when interrupts are
+	 * disabled, we need to save the additional interrupt registers over
+	 * MCA/INIT and resume.
+	 */
+	unsigned long		isr;
+	unsigned long		ifa;
+	unsigned long		itir;
+	unsigned long		iipa;
+	unsigned long		iim;
+	unsigned long		iha;
+
+	/* OS to SAL */
+	unsigned long		os_status;		/* OS status to SAL, enum below */
+	unsigned long		context;		/* 0 if return to same context
+							   1 if return to new context */
+
+	/* I-resources */
+	unsigned long		iip;
+	unsigned long		ipsr;
+	unsigned long		ifs;
+};
+
+enum {
+	IA64_MCA_CORRECTED	=	0x0,	/* Error has been corrected by OS_MCA */
+	IA64_MCA_WARM_BOOT	=	-1,	/* Warm boot of the system need from SAL */
+	IA64_MCA_COLD_BOOT	=	-2,	/* Cold boot of the system need from SAL */
+	IA64_MCA_HALT		=	-3	/* System to be halted by SAL */
+};
+
+enum {
+	IA64_INIT_RESUME	=	0x0,	/* Resume after return from INIT */
+	IA64_INIT_WARM_BOOT	=	-1,	/* Warm boot of the system need from SAL */
+};
+
+enum {
+	IA64_MCA_SAME_CONTEXT	=	0x0,	/* SAL to return to same context */
+	IA64_MCA_NEW_CONTEXT	=	-1	/* SAL to return to new context */
+};
+
+/* Per-CPU MCA state that is too big for normal per-CPU variables.  */
+
+struct ia64_mca_cpu {
+	u64 mca_stack[KERNEL_STACK_SIZE/8];
+	u64 init_stack[KERNEL_STACK_SIZE/8];
+};
+
+/* Array of physical addresses of each CPU's MCA area.  */
+extern unsigned long __per_cpu_mca[NR_CPUS];
+
+extern int cpe_vector;
+extern int ia64_cpe_irq;
+extern void ia64_mca_init(void);
+extern void ia64_mca_irq_init(void);
+extern void ia64_mca_cpu_init(void *);
+extern void ia64_os_mca_dispatch(void);
+extern void ia64_os_mca_dispatch_end(void);
+extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
+extern void ia64_init_handler(struct pt_regs *,
+			      struct switch_stack *,
+			      struct ia64_sal_os_state *);
+extern void ia64_os_init_on_kdump(void);
+extern void ia64_monarch_init_handler(void);
+extern void ia64_slave_init_handler(void);
+extern void ia64_mca_cmc_vector_setup(void);
+extern int  ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
+extern void ia64_unreg_MCA_extension(void);
+extern unsigned long ia64_get_rnat(unsigned long *);
+extern void ia64_set_psr_mc(void);
+extern void ia64_mca_printk(const char * fmt, ...)
+	 __attribute__ ((format (printf, 1, 2)));
+
+struct ia64_mca_notify_die {
+	struct ia64_sal_os_state *sos;
+	int *monarch_cpu;
+	int *data;
+};
+
+DECLARE_PER_CPU(u64, ia64_mca_pal_base);
+
+#else	/* __ASSEMBLY__ */
+
+#define IA64_MCA_CORRECTED	0x0	/* Error has been corrected by OS_MCA */
+#define IA64_MCA_WARM_BOOT	-1	/* Warm boot of the system need from SAL */
+#define IA64_MCA_COLD_BOOT	-2	/* Cold boot of the system need from SAL */
+#define IA64_MCA_HALT		-3	/* System to be halted by SAL */
+
+#define IA64_INIT_RESUME	0x0	/* Resume after return from INIT */
+#define IA64_INIT_WARM_BOOT	-1	/* Warm boot of the system need from SAL */
+
+#define IA64_MCA_SAME_CONTEXT	0x0	/* SAL to return to same context */
+#define IA64_MCA_NEW_CONTEXT	-1	/* SAL to return to new context */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_MCA_H */
diff --git a/arch/ia64/include/asm/mca_asm.h b/arch/ia64/include/asm/mca_asm.h
new file mode 100644
index 0000000..13c1d49
--- /dev/null
+++ b/arch/ia64/include/asm/mca_asm.h
@@ -0,0 +1,244 @@
+/*
+ * File:	mca_asm.h
+ * Purpose:	Machine check handling specific defines
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2005 Silicon Graphics, Inc
+ * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
+ */
+#ifndef _ASM_IA64_MCA_ASM_H
+#define _ASM_IA64_MCA_ASM_H
+
+#include <asm/percpu.h>
+
+#define PSR_IC		13
+#define PSR_I		14
+#define	PSR_DT		17
+#define PSR_RT		27
+#define PSR_MC		35
+#define PSR_IT		36
+#define PSR_BN		44
+
+/*
+ * This macro converts a instruction virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *	1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define INST_VA_TO_PA(addr)							\
+	dep	addr	= 0, addr, 61, 3
+/*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *	1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define DATA_VA_TO_PA(addr)							\
+	tpa	addr	= addr
+/*
+ * This macro converts a data physical address to a virtual address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *	1. Put 0x7 in bits 61 thru 63.
+ */
+#define DATA_PA_TO_VA(addr,temp)							\
+	mov	temp	= 0x7	;;							\
+	dep	addr	= temp, addr, 61, 3
+
+#define GET_THIS_PADDR(reg, var)		\
+	mov	reg = IA64_KR(PER_CPU_DATA);;	\
+        addl	reg = THIS_CPU(var), reg
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in physical mode with all the address
+ * translations turned off.
+ *	1.	Save the current psr
+ *	2.	Make sure that all the upper 32 bits are off
+ *
+ *	3.	Clear the interrupt enable and interrupt state collection bits
+ *		in the psr before updating the ipsr and iip.
+ *
+ *	4.	Turn off the instruction, data and rse translation bits of the psr
+ *		and store the new value into ipsr
+ *		Also make sure that the interrupts are disabled.
+ *		Ensure that we are in little endian mode.
+ *		[psr.{rt, it, dt, i, be} = 0]
+ *
+ *	5.	Get the physical address corresponding to the virtual address
+ *		of the next instruction bundle and put it in iip.
+ *		(Using magic numbers 24 and 40 in the deposint instruction since
+ *		 the IA64_SDK code directly maps to lower 24bits as physical address
+ *		 from a virtual address).
+ *
+ *	6.	Do an rfi to move the values from ipsr to psr and iip to ip.
+ */
+#define  PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)				\
+	mov	old_psr = psr;								\
+	;;										\
+	dep	old_psr = 0, old_psr, 32, 32;						\
+											\
+	mov	ar.rsc = 0 ;								\
+	;;										\
+	srlz.d;										\
+	mov	temp2 = ar.bspstore;							\
+	;;										\
+	DATA_VA_TO_PA(temp2);								\
+	;;										\
+	mov	temp1 = ar.rnat;							\
+	;;										\
+	mov	ar.bspstore = temp2;							\
+	;;										\
+	mov	ar.rnat = temp1;							\
+	mov	temp1 = psr;								\
+	mov	temp2 = psr;								\
+	;;										\
+											\
+	dep	temp2 = 0, temp2, PSR_IC, 2;						\
+	;;										\
+	mov	psr.l = temp2;								\
+	;;										\
+	srlz.d;										\
+	dep	temp1 = 0, temp1, 32, 32;						\
+	;;										\
+	dep	temp1 = 0, temp1, PSR_IT, 1;						\
+	;;										\
+	dep	temp1 = 0, temp1, PSR_DT, 1;						\
+	;;										\
+	dep	temp1 = 0, temp1, PSR_RT, 1;						\
+	;;										\
+	dep	temp1 = 0, temp1, PSR_I, 1;						\
+	;;										\
+	dep	temp1 = 0, temp1, PSR_IC, 1;						\
+	;;										\
+	dep	temp1 = -1, temp1, PSR_MC, 1;						\
+	;;										\
+	mov	cr.ipsr = temp1;							\
+	;;										\
+	LOAD_PHYSICAL(p0, temp2, start_addr);						\
+	;;										\
+	mov	cr.iip = temp2;								\
+	mov	cr.ifs = r0;								\
+	DATA_VA_TO_PA(sp);								\
+	DATA_VA_TO_PA(gp);								\
+	;;										\
+	srlz.i;										\
+	;;										\
+	nop	1;									\
+	nop	2;									\
+	nop	1;									\
+	nop	2;									\
+	rfi;										\
+	;;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in virtual mode with all the address
+ * translations turned on.
+ *	1.	Get the old saved psr
+ *
+ *	2.	Clear the interrupt state collection bit in the current psr.
+ *
+ *	3.	Set the instruction translation bit back in the old psr
+ *		Note we have to do this since we are right now saving only the
+ *		lower 32-bits of old psr.(Also the old psr has the data and
+ *		rse translation bits on)
+ *
+ *	4.	Set ipsr to this old_psr with "it" bit set and "bn" = 1.
+ *
+ *	5.	Reset the current thread pointer (r13).
+ *
+ *	6.	Set iip to the virtual address of the next instruction bundle.
+ *
+ *	7.	Do an rfi to move ipsr to psr and iip to ip.
+ */
+
+#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)	\
+	mov	temp2 = psr;					\
+	;;							\
+	mov	old_psr = temp2;				\
+	;;							\
+	dep	temp2 = 0, temp2, PSR_IC, 2;			\
+	;;							\
+	mov	psr.l = temp2;					\
+	mov	ar.rsc = 0;					\
+	;;							\
+	srlz.d;							\
+	mov	r13 = ar.k6;					\
+	mov	temp2 = ar.bspstore;				\
+	;;							\
+	DATA_PA_TO_VA(temp2,temp1);				\
+	;;							\
+	mov	temp1 = ar.rnat;				\
+	;;							\
+	mov	ar.bspstore = temp2;				\
+	;;							\
+	mov	ar.rnat = temp1;				\
+	;;							\
+	mov	temp1 = old_psr;				\
+	;;							\
+	mov	temp2 = 1;					\
+	;;							\
+	dep	temp1 = temp2, temp1, PSR_IC, 1;		\
+	;;							\
+	dep	temp1 = temp2, temp1, PSR_IT, 1;		\
+	;;							\
+	dep	temp1 = temp2, temp1, PSR_DT, 1;		\
+	;;							\
+	dep	temp1 = temp2, temp1, PSR_RT, 1;		\
+	;;							\
+	dep	temp1 = temp2, temp1, PSR_BN, 1;		\
+	;;							\
+								\
+	mov     cr.ipsr = temp1;				\
+	movl	temp2 = start_addr;				\
+	;;							\
+	mov	cr.iip = temp2;					\
+	movl	gp = __gp					\
+	;;							\
+	DATA_PA_TO_VA(sp, temp1);				\
+	srlz.i;							\
+	;;							\
+	nop	1;						\
+	nop	2;						\
+	nop	1;						\
+	rfi							\
+	;;
+
+/*
+ * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
+ * stacks, except that the SAL/OS state and a switch_stack are stored near the
+ * top of the MCA/INIT stack.  To support concurrent entry to MCA or INIT, as
+ * well as MCA over INIT, each event needs its own SAL/OS state.  All entries
+ * are 16 byte aligned.
+ *
+ *      +---------------------------+
+ *      |          pt_regs          |
+ *      +---------------------------+
+ *      |        switch_stack       |
+ *      +---------------------------+
+ *      |        SAL/OS state       |
+ *      +---------------------------+
+ *      |    16 byte scratch area   |
+ *      +---------------------------+ <-------- SP at start of C MCA handler
+ *      |           .....           |
+ *      +---------------------------+
+ *      | RBS for MCA/INIT handler  |
+ *      +---------------------------+
+ *      | struct task for MCA/INIT  |
+ *      +---------------------------+ <-------- Bottom of MCA/INIT stack
+ */
+
+#define ALIGN16(x)			((x)&~15)
+#define MCA_PT_REGS_OFFSET		ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
+#define MCA_SWITCH_STACK_OFFSET		ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
+#define MCA_SOS_OFFSET			ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
+#define MCA_SP_OFFSET			ALIGN16(MCA_SOS_OFFSET-16)
+
+#endif /* _ASM_IA64_MCA_ASM_H */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
new file mode 100644
index 0000000..092f1c9
--- /dev/null
+++ b/arch/ia64/include/asm/meminit.h
@@ -0,0 +1,74 @@
+#ifndef meminit_h
+#define meminit_h
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+
+/*
+ * Entries defined so far:
+ * 	- boot param structure itself
+ * 	- memory map
+ * 	- initrd (optional)
+ * 	- command line string
+ * 	- kernel code & data
+ * 	- crash dumping code reserved region
+ * 	- Kernel memory map built from EFI memory map
+ * 	- ELF core header
+ *
+ * More could be added if necessary
+ */
+#define IA64_MAX_RSVD_REGIONS 9
+
+struct rsvd_region {
+	u64 start;	/* virtual address of beginning of element */
+	u64 end;	/* virtual address of end of element + 1 */
+};
+
+extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+extern int num_rsvd_regions;
+
+extern void find_memory (void);
+extern void reserve_memory (void);
+extern void find_initrd (void);
+extern int filter_rsvd_memory (u64 start, u64 end, void *arg);
+extern int filter_memory (u64 start, u64 end, void *arg);
+extern unsigned long efi_memmap_init(u64 *s, u64 *e);
+extern int find_max_min_low_pfn (u64, u64, void *);
+
+extern unsigned long vmcore_find_descriptor_size(unsigned long address);
+extern int reserve_elfcorehdr(u64 *start, u64 *end);
+
+/*
+ * For rounding an address to the next IA64_GRANULE_SIZE or order
+ */
+#define GRANULEROUNDDOWN(n)	((n) & ~(IA64_GRANULE_SIZE-1))
+#define GRANULEROUNDUP(n)	(((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
+
+#ifdef CONFIG_NUMA
+  extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
+#else
+# define call_pernode_memory(start, len, func)	(*func)(start, len, 0)
+#endif
+
+#define IGNORE_PFN0	1	/* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
+
+extern int register_active_ranges(u64 start, u64 len, int nid);
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define LARGE_GAP	0x40000000 /* Use virtual mem map if hole is > than this */
+  extern unsigned long VMALLOC_END;
+  extern struct page *vmem_map;
+  extern int find_largest_hole(u64 start, u64 end, void *arg);
+  extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
+  extern int vmemmap_find_next_valid_pfn(int, int);
+#else
+static inline int vmemmap_find_next_valid_pfn(int node, int i)
+{
+	return i + 1;
+}
+#endif
+#endif /* meminit_h */
diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h
new file mode 100644
index 0000000..fdd5f52
--- /dev/null
+++ b/arch/ia64/include/asm/mman.h
@@ -0,0 +1,17 @@
+/*
+ * Based on <asm-i386/mman.h>.
+ *
+ * Modified 1998-2000, 2002
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_MMAN_H
+#define _ASM_IA64_MMAN_H
+
+#include <uapi/asm/mman.h>
+
+#ifndef __ASSEMBLY__
+#define arch_mmap_check	ia64_mmap_check
+int ia64_mmap_check(unsigned long addr, unsigned long len,
+		unsigned long flags);
+#endif
+#endif /* _ASM_IA64_MMAN_H */
diff --git a/arch/ia64/include/asm/mmu.h b/arch/ia64/include/asm/mmu.h
new file mode 100644
index 0000000..611432b
--- /dev/null
+++ b/arch/ia64/include/asm/mmu.h
@@ -0,0 +1,13 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+/*
+ * Type for a context number.  We declare it volatile to ensure proper
+ * ordering when it's accessed outside of spinlock'd critical sections
+ * (e.g., as done in activate_mm() and init_new_context()).
+ */
+typedef volatile unsigned long mm_context_t;
+
+typedef unsigned long nv_mm_context_t;
+
+#endif
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
new file mode 100644
index 0000000..7f2a456
--- /dev/null
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -0,0 +1,198 @@
+#ifndef _ASM_IA64_MMU_CONTEXT_H
+#define _ASM_IA64_MMU_CONTEXT_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * Routines to manage the allocation of task context numbers.  Task context
+ * numbers are used to reduce or eliminate the need to perform TLB flushes
+ * due to context switches.  Context numbers are implemented using ia-64
+ * region ids.  Since the IA-64 TLB does not consider the region number when
+ * performing a TLB lookup, we need to assign a unique region id to each
+ * region in a process.  We use the least significant three bits in aregion
+ * id for this purpose.
+ */
+
+#define IA64_REGION_ID_KERNEL	0 /* the kernel's region id (tlb.c depends on this being 0) */
+
+#define ia64_rid(ctx,addr)	(((ctx) << 3) | (addr >> 61))
+
+# include <asm/page.h>
+# ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm-generic/mm_hooks.h>
+
+struct ia64_ctx {
+	spinlock_t lock;
+	unsigned int next;	/* next context number to use */
+	unsigned int limit;     /* available free range */
+	unsigned int max_ctx;   /* max. context value supported by all CPUs */
+				/* call wrap_mmu_context when next >= max */
+	unsigned long *bitmap;  /* bitmap size is max_ctx+1 */
+	unsigned long *flushmap;/* pending rid to be flushed */
+};
+
+extern struct ia64_ctx ia64_ctx;
+DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
+
+extern void mmu_context_init (void);
+extern void wrap_mmu_context (struct mm_struct *mm);
+
+static inline void
+enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * When the context counter wraps around all TLBs need to be flushed because
+ * an old context number might have been reused. This is signalled by the
+ * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
+ * below. Called by activate_mm(). <efocht@ess.nec.de>
+ */
+static inline void
+delayed_tlb_flush (void)
+{
+	extern void local_flush_tlb_all (void);
+	unsigned long flags;
+
+	if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
+		spin_lock_irqsave(&ia64_ctx.lock, flags);
+		if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+			local_flush_tlb_all();
+			__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+		}
+		spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+	}
+}
+
+static inline nv_mm_context_t
+get_mmu_context (struct mm_struct *mm)
+{
+	unsigned long flags;
+	nv_mm_context_t context = mm->context;
+
+	if (likely(context))
+		goto out;
+
+	spin_lock_irqsave(&ia64_ctx.lock, flags);
+	/* re-check, now that we've got the lock: */
+	context = mm->context;
+	if (context == 0) {
+		cpumask_clear(mm_cpumask(mm));
+		if (ia64_ctx.next >= ia64_ctx.limit) {
+			ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
+					ia64_ctx.max_ctx, ia64_ctx.next);
+			ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
+					ia64_ctx.max_ctx, ia64_ctx.next);
+			if (ia64_ctx.next >= ia64_ctx.max_ctx)
+				wrap_mmu_context(mm);
+		}
+		mm->context = context = ia64_ctx.next++;
+		__set_bit(context, ia64_ctx.bitmap);
+	}
+	spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+out:
+	/*
+	 * Ensure we're not starting to use "context" before any old
+	 * uses of it are gone from our TLB.
+	 */
+	delayed_tlb_flush();
+
+	return context;
+}
+
+/*
+ * Initialize context number to some sane value.  MM is guaranteed to be a
+ * brand-new address-space, so no TLB flushing is needed, ever.
+ */
+static inline int
+init_new_context (struct task_struct *p, struct mm_struct *mm)
+{
+	mm->context = 0;
+	return 0;
+}
+
+static inline void
+destroy_context (struct mm_struct *mm)
+{
+	/* Nothing to do.  */
+}
+
+static inline void
+reload_context (nv_mm_context_t context)
+{
+	unsigned long rid;
+	unsigned long rid_incr = 0;
+	unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
+
+	old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
+	rid = context << 3;	/* make space for encoding the region number */
+	rid_incr = 1 << 8;
+
+	/* encode the region id, preferred page size, and VHPT enable bit: */
+	rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
+	rr1 = rr0 + 1*rid_incr;
+	rr2 = rr0 + 2*rid_incr;
+	rr3 = rr0 + 3*rid_incr;
+	rr4 = rr0 + 4*rid_incr;
+#ifdef  CONFIG_HUGETLB_PAGE
+	rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
+
+#  if RGN_HPAGE != 4
+#    error "reload_context assumes RGN_HPAGE is 4"
+#  endif
+#endif
+
+	ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
+	ia64_srlz_i();			/* srlz.i implies srlz.d */
+}
+
+/*
+ * Must be called with preemption off
+ */
+static inline void
+activate_context (struct mm_struct *mm)
+{
+	nv_mm_context_t context;
+
+	do {
+		context = get_mmu_context(mm);
+		if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+			cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+		reload_context(context);
+		/*
+		 * in the unlikely event of a TLB-flush by another thread,
+		 * redo the load.
+		 */
+	} while (unlikely(context != mm->context));
+}
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+/*
+ * Switch from address space PREV to address space NEXT.
+ */
+static inline void
+activate_mm (struct mm_struct *prev, struct mm_struct *next)
+{
+	/*
+	 * We may get interrupts here, but that's OK because interrupt
+	 * handlers cannot touch user-space.
+	 */
+	ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
+	activate_context(next);
+}
+
+#define switch_mm(prev_mm,next_mm,next_task)	activate_mm(prev_mm, next_mm)
+
+# endif /* ! __ASSEMBLY__ */
+#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
new file mode 100644
index 0000000..e0de617
--- /dev/null
+++ b/arch/ia64/include/asm/mmzone.h
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000,2003 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (c) 2002 NEC Corp.
+ * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
+ * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
+ */
+#ifndef _ASM_IA64_MMZONE_H
+#define _ASM_IA64_MMZONE_H
+
+#include <linux/numa.h>
+#include <asm/page.h>
+#include <asm/meminit.h>
+
+#ifdef CONFIG_NUMA
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+	extern int paddr_to_nid(unsigned long);
+	int nid = paddr_to_nid(pfn << PAGE_SHIFT);
+	if (nid < 0)
+		return 0;
+	else
+		return nid;
+}
+
+#ifdef CONFIG_IA64_DIG /* DIG systems are small */
+# define MAX_PHYSNODE_ID	8
+# define NR_NODE_MEMBLKS	(MAX_NUMNODES * 8)
+#else /* sn2 is the biggest case, so we use that if !DIG */
+# define MAX_PHYSNODE_ID	2048
+# define NR_NODE_MEMBLKS	(MAX_NUMNODES * 4)
+#endif
+
+#else /* CONFIG_NUMA */
+# define NR_NODE_MEMBLKS	(MAX_NUMNODES * 4)
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_MMZONE_H */
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
new file mode 100644
index 0000000..f31894b
--- /dev/null
+++ b/arch/ia64/include/asm/module.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_IA64_MODULE_H
+#define _ASM_IA64_MODULE_H
+
+#include <asm-generic/module.h>
+
+/*
+ * IA-64-specific support for kernel module loader.
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+struct elf64_shdr;			/* forward declration */
+
+struct mod_arch_specific {
+	struct elf64_shdr *core_plt;	/* core PLT section */
+	struct elf64_shdr *init_plt;	/* init PLT section */
+	struct elf64_shdr *got;		/* global offset table */
+	struct elf64_shdr *opd;		/* official procedure descriptors */
+	struct elf64_shdr *unwind;	/* unwind-table section */
+	unsigned long gp;		/* global-pointer for module */
+
+	void *core_unw_table;		/* core unwind-table cookie returned by unwinder */
+	void *init_unw_table;		/* init unwind-table cookie returned by unwinder */
+	unsigned int next_got_entry;	/* index of next available got entry */
+};
+
+#define MODULE_PROC_FAMILY	"ia64"
+#define MODULE_ARCH_VERMAGIC	MODULE_PROC_FAMILY \
+	"gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
+
+#define ARCH_SHF_SMALL	SHF_IA_64_SHORT
+
+#endif /* _ASM_IA64_MODULE_H */
diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h
new file mode 100644
index 0000000..592c104
--- /dev/null
+++ b/arch/ia64/include/asm/msidef.h
@@ -0,0 +1,42 @@
+#ifndef _IA64_MSI_DEF_H
+#define _IA64_MSI_DEF_H
+
+/*
+ * Shifts for APIC-based data
+ */
+
+#define     MSI_DATA_VECTOR_SHIFT	0
+#define	    MSI_DATA_VECTOR(v)		(((u8)v) << MSI_DATA_VECTOR_SHIFT)
+#define     MSI_DATA_VECTOR_MASK	0xffffff00
+
+#define     MSI_DATA_DELIVERY_MODE_SHIFT	8
+#define     MSI_DATA_DELIVERY_FIXED	(0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define     MSI_DATA_DELIVERY_LOWPRI	(1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+
+#define     MSI_DATA_LEVEL_SHIFT	14
+#define     MSI_DATA_LEVEL_DEASSERT	(0 << MSI_DATA_LEVEL_SHIFT)
+#define     MSI_DATA_LEVEL_ASSERT	(1 << MSI_DATA_LEVEL_SHIFT)
+
+#define     MSI_DATA_TRIGGER_SHIFT	15
+#define     MSI_DATA_TRIGGER_EDGE	(0 << MSI_DATA_TRIGGER_SHIFT)
+#define     MSI_DATA_TRIGGER_LEVEL	(1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ * Shift/mask fields for APIC-based bus address
+ */
+
+#define     MSI_ADDR_DEST_ID_SHIFT	4
+#define     MSI_ADDR_HEADER		0xfee00000
+
+#define     MSI_ADDR_DEST_ID_MASK	0xfff0000f
+#define     MSI_ADDR_DEST_ID_CPU(cpu)	((cpu) << MSI_ADDR_DEST_ID_SHIFT)
+
+#define     MSI_ADDR_DEST_MODE_SHIFT	2
+#define     MSI_ADDR_DEST_MODE_PHYS	(0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define	    MSI_ADDR_DEST_MODE_LOGIC	(1 << MSI_ADDR_DEST_MODE_SHIFT)
+
+#define     MSI_ADDR_REDIRECTION_SHIFT	3
+#define     MSI_ADDR_REDIRECTION_CPU	(0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define     MSI_ADDR_REDIRECTION_LOWPRI	(1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+#endif/* _IA64_MSI_DEF_H */
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
new file mode 100644
index 0000000..f41e66d
--- /dev/null
+++ b/arch/ia64/include/asm/mutex.h
@@ -0,0 +1,90 @@
+/*
+ * ia64 implementation of the mutex fastpath.
+ *
+ * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
+ *
+ */
+
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ *  __mutex_fastpath_lock - try to take the lock by moving the count
+ *                          from 1 to a 0 value
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+	if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+		fail_fn(count);
+}
+
+/**
+ *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ *                                 from 1 to a 0 value
+ *  @count: pointer of type atomic_t
+ *
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count)
+{
+	if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+		return -1;
+	return 0;
+}
+
+/**
+ *  __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+	int ret = ia64_fetchadd4_rel(count, 1);
+	if (unlikely(ret < 0))
+		fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock()		1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+	if (cmpxchg_acq(count, 1, 0) == 1)
+		return 1;
+	return 0;
+}
+
+#endif
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
new file mode 100644
index 0000000..7e08f17
--- /dev/null
+++ b/arch/ia64/include/asm/native/inst.h
@@ -0,0 +1,133 @@
+/******************************************************************************
+ * arch/ia64/include/asm/native/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define DO_SAVE_MIN		IA64_NATIVE_DO_SAVE_MIN
+
+#define MOV_FROM_IFA(reg)	\
+	mov reg = cr.ifa
+
+#define MOV_FROM_ITIR(reg)	\
+	mov reg = cr.itir
+
+#define MOV_FROM_ISR(reg)	\
+	mov reg = cr.isr
+
+#define MOV_FROM_IHA(reg)	\
+	mov reg = cr.iha
+
+#define MOV_FROM_IPSR(pred, reg)	\
+(pred)	mov reg = cr.ipsr
+
+#define MOV_FROM_IIM(reg)	\
+	mov reg = cr.iim
+
+#define MOV_FROM_IIP(reg)	\
+	mov reg = cr.iip
+
+#define MOV_FROM_IVR(reg, clob)	\
+	mov reg = cr.ivr
+
+#define MOV_FROM_PSR(pred, reg, clob)	\
+(pred)	mov reg = psr
+
+#define MOV_FROM_ITC(pred, pred_clob, reg, clob)	\
+(pred)	mov reg = ar.itc
+
+#define MOV_TO_IFA(reg, clob)	\
+	mov cr.ifa = reg
+
+#define MOV_TO_ITIR(pred, reg, clob)	\
+(pred)	mov cr.itir = reg
+
+#define MOV_TO_IHA(pred, reg, clob)	\
+(pred)	mov cr.iha = reg
+
+#define MOV_TO_IPSR(pred, reg, clob)		\
+(pred)	mov cr.ipsr = reg
+
+#define MOV_TO_IFS(pred, reg, clob)	\
+(pred)	mov cr.ifs = reg
+
+#define MOV_TO_IIP(reg, clob)	\
+	mov cr.iip = reg
+
+#define MOV_TO_KR(kr, reg, clob0, clob1)	\
+	mov IA64_KR(kr) = reg
+
+#define ITC_I(pred, reg, clob)	\
+(pred)	itc.i reg
+
+#define ITC_D(pred, reg, clob)	\
+(pred)	itc.d reg
+
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob)	\
+(pred_i) itc.i reg;				\
+(pred_d) itc.d reg
+
+#define THASH(pred, reg0, reg1, clob)		\
+(pred)	thash reg0 = reg1
+
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1)		\
+	ssm psr.ic | PSR_DEFAULT_BITS					\
+	;;								\
+	srlz.i /* guarantee that interruption collectin is on */	\
+	;;
+
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1)	\
+	ssm psr.ic				\
+	;;					\
+	srlz.d
+
+#define RSM_PSR_IC(clob)	\
+	rsm psr.ic
+
+#define SSM_PSR_I(pred, pred_clob, clob)	\
+(pred)	ssm psr.i
+
+#define RSM_PSR_I(pred, clob0, clob1)	\
+(pred)	rsm psr.i
+
+#define RSM_PSR_I_IC(clob0, clob1, clob2)	\
+	rsm psr.i | psr.ic
+
+#define RSM_PSR_DT		\
+	rsm psr.dt
+
+#define RSM_PSR_BE_I(clob0, clob1)	\
+	rsm psr.be | psr.i
+
+#define SSM_PSR_DT_AND_SRLZ_I	\
+	ssm psr.dt		\
+	;;			\
+	srlz.i
+
+#define BSW_0(clob0, clob1, clob2)	\
+	bsw.0
+
+#define BSW_1(clob0, clob1)	\
+	bsw.1
+
+#define COVER	\
+	cover
+
+#define RFI	\
+	rfi
diff --git a/arch/ia64/include/asm/native/irq.h b/arch/ia64/include/asm/native/irq.h
new file mode 100644
index 0000000..887a228
--- /dev/null
+++ b/arch/ia64/include/asm/native/irq.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ * arch/ia64/include/asm/native/irq.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _ASM_IA64_NATIVE_IRQ_H
+#define _ASM_IA64_NATIVE_IRQ_H
+
+#define NR_VECTORS	256
+
+#if (NR_VECTORS + 32 * NR_CPUS) < 1024
+#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
+#else
+#define IA64_NATIVE_NR_IRQS 1024
+#endif
+
+#endif /* _ASM_IA64_NATIVE_IRQ_H */
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h
new file mode 100644
index 0000000..be16ca9
--- /dev/null
+++ b/arch/ia64/include/asm/native/patchlist.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ * arch/ia64/include/asm/native/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define __paravirt_start_gate_fsyscall_patchlist		\
+	__ia64_native_start_gate_fsyscall_patchlist
+#define __paravirt_end_gate_fsyscall_patchlist			\
+	__ia64_native_end_gate_fsyscall_patchlist
+#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist	\
+	__ia64_native_start_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist	\
+	__ia64_native_end_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_start_gate_vtop_patchlist			\
+	__ia64_native_start_gate_vtop_patchlist
+#define __paravirt_end_gate_vtop_patchlist			\
+	__ia64_native_end_gate_vtop_patchlist
+#define __paravirt_start_gate_mckinley_e9_patchlist		\
+	__ia64_native_start_gate_mckinley_e9_patchlist
+#define __paravirt_end_gate_mckinley_e9_patchlist		\
+	__ia64_native_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/nodedata.h b/arch/ia64/include/asm/nodedata.h
new file mode 100644
index 0000000..2fb337b
--- /dev/null
+++ b/arch/ia64/include/asm/nodedata.h
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (c) 2002 NEC Corp.
+ * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
+ * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
+ */
+#ifndef _ASM_IA64_NODEDATA_H
+#define _ASM_IA64_NODEDATA_H
+
+#include <linux/numa.h>
+
+#include <asm/percpu.h>
+#include <asm/mmzone.h>
+
+#ifdef CONFIG_NUMA
+
+/*
+ * Node Data. One of these structures is located on each node of a NUMA system.
+ */
+
+struct pglist_data;
+struct ia64_node_data {
+	short			active_cpu_count;
+	short			node;
+	struct pglist_data	*pg_data_ptrs[MAX_NUMNODES];
+};
+
+
+/*
+ * Return a pointer to the node_data structure for the executing cpu.
+ */
+#define local_node_data		(local_cpu_data->node_data)
+
+/*
+ * Given a node id, return a pointer to the pg_data_t for the node.
+ *
+ * NODE_DATA 	- should be used in all code not related to system
+ *		  initialization. It uses pernode data structures to minimize
+ *		  offnode memory references. However, these structure are not 
+ *		  present during boot. This macro can be used once cpu_init
+ *		  completes.
+ */
+#define NODE_DATA(nid)		(local_node_data->pg_data_ptrs[nid])
+
+/*
+ * LOCAL_DATA_ADDR - This is to calculate the address of other node's
+ *		     "local_node_data" at hot-plug phase. The local_node_data
+ *		     is pointed by per_cpu_page. Kernel usually use it for
+ *		     just executing cpu. However, when new node is hot-added,
+ *		     the addresses of local data for other nodes are necessary
+ *		     to update all of them.
+ */
+#define LOCAL_DATA_ADDR(pgdat)  			\
+	((struct ia64_node_data *)((u64)(pgdat) + 	\
+				   L1_CACHE_ALIGN(sizeof(struct pglist_data))))
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
new file mode 100644
index 0000000..2db0a6c
--- /dev/null
+++ b/arch/ia64/include/asm/numa.h
@@ -0,0 +1,79 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains NUMA specific prototypes and definitions.
+ *
+ * 2002/08/05 Erich Focht <efocht@ess.nec.de>
+ *
+ */
+#ifndef _ASM_IA64_NUMA_H
+#define _ASM_IA64_NUMA_H
+
+
+#ifdef CONFIG_NUMA
+
+#include <linux/cache.h>
+#include <linux/cpumask.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/mmzone.h>
+
+extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
+extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
+extern pg_data_t *pgdat_list[MAX_NUMNODES];
+
+/* Stuff below this line could be architecture independent */
+
+extern int num_node_memblks;		/* total number of memory chunks */
+
+/*
+ * List of node memory chunks. Filled when parsing SRAT table to
+ * obtain information about memory nodes.
+*/
+
+struct node_memblk_s {
+	unsigned long start_paddr;
+	unsigned long size;
+	int nid;		/* which logical node contains this chunk? */
+	int bank;		/* which mem bank on this node */
+};
+
+struct node_cpuid_s {
+	u16	phys_id;	/* id << 8 | eid */
+	int	nid;		/* logical node containing this CPU */
+};
+
+extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
+extern struct node_cpuid_s node_cpuid[NR_CPUS];
+
+/*
+ * ACPI 2.0 SLIT (System Locality Information Table)
+ * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
+ *
+ * This is a matrix with "distances" between nodes, they should be
+ * proportional to the memory access latency ratios.
+ */
+
+extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
+#define node_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
+
+extern int paddr_to_nid(unsigned long paddr);
+
+#define local_nodeid (cpu_to_node_map[smp_processor_id()])
+
+extern void map_cpu_to_node(int cpu, int nid);
+extern void unmap_cpu_from_node(int cpu, int nid);
+extern void numa_clear_node(int cpu);
+
+#else /* !CONFIG_NUMA */
+#define map_cpu_to_node(cpu, nid)	do{}while(0)
+#define unmap_cpu_from_node(cpu, nid)	do{}while(0)
+#define paddr_to_nid(addr)	0
+#define numa_clear_node(cpu)	do { } while (0)
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
new file mode 100644
index 0000000..ec48bb9
--- /dev/null
+++ b/arch/ia64/include/asm/page.h
@@ -0,0 +1,236 @@
+#ifndef _ASM_IA64_PAGE_H
+#define _ASM_IA64_PAGE_H
+/*
+ * Pagetable related stuff.
+ *
+ * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/types.h>
+
+/*
+ * The top three bits of an IA64 address are its Region Number.
+ * Different regions are assigned to different purposes.
+ */
+#define RGN_SHIFT	(61)
+#define RGN_BASE(r)	(__IA64_UL_CONST(r)<<RGN_SHIFT)
+#define RGN_BITS	(RGN_BASE(-1))
+
+#define RGN_KERNEL	7	/* Identity mapped region */
+#define RGN_UNCACHED    6	/* Identity mapped I/O region */
+#define RGN_GATE	5	/* Gate page, Kernel text, etc */
+#define RGN_HPAGE	4	/* For Huge TLB pages */
+
+/*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define PAGE_SHIFT	12
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define PAGE_SHIFT	13
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define PAGE_SHIFT	14
+#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
+# define PAGE_SHIFT	16
+#else
+# error Unsupported page size!
+#endif
+
+#define PAGE_SIZE		(__IA64_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK		(~(PAGE_SIZE - 1))
+
+#define PERCPU_PAGE_SHIFT	18	/* log2() of max. size of per-CPU area */
+#define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
+
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define HPAGE_REGION_BASE	RGN_BASE(RGN_HPAGE)
+# define HPAGE_SHIFT		hpage_shift
+# define HPAGE_SHIFT_DEFAULT	28	/* check ia64 SDM for architecture supported size */
+# define HPAGE_SIZE		(__IA64_UL_CONST(1) << HPAGE_SHIFT)
+# define HPAGE_MASK		(~(HPAGE_SIZE - 1))
+
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef __ASSEMBLY__
+# define __pa(x)		((x) - PAGE_OFFSET)
+# define __va(x)		((x) + PAGE_OFFSET)
+#else /* !__ASSEMBLY */
+#  define STRICT_MM_TYPECHECKS
+
+extern void clear_page (void *page);
+extern void copy_page (void *to, void *from);
+
+/*
+ * clear_user_page() and copy_user_page() can't be inline functions because
+ * flush_dcache_page() can't be defined until later...
+ */
+#define clear_user_page(addr, vaddr, page)	\
+do {						\
+	clear_page(addr);			\
+	flush_dcache_page(page);		\
+} while (0)
+
+#define copy_user_page(to, from, vaddr, page)	\
+do {						\
+	copy_page((to), (from));		\
+	flush_dcache_page(page);		\
+} while (0)
+
+
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)		\
+({									\
+	struct page *page = alloc_page_vma(				\
+		GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);	\
+	if (page)							\
+ 		flush_dcache_page(page);				\
+	page;								\
+})
+
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern int ia64_pfn_valid (unsigned long pfn);
+#else
+# define ia64_pfn_valid(pfn) 1
+#endif
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern struct page *vmem_map;
+#ifdef CONFIG_DISCONTIGMEM
+# define page_to_pfn(page)	((unsigned long) (page - vmem_map))
+# define pfn_to_page(pfn)	(vmem_map + (pfn))
+#else
+# include <asm-generic/memory_model.h>
+#endif
+#else
+# include <asm-generic/memory_model.h>
+#endif
+
+#ifdef CONFIG_FLATMEM
+# define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+#elif defined(CONFIG_DISCONTIGMEM)
+extern unsigned long min_low_pfn;
+extern unsigned long max_low_pfn;
+# define pfn_valid(pfn)		(((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
+#endif
+
+#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+
+typedef union ia64_va {
+	struct {
+		unsigned long off : 61;		/* intra-region offset */
+		unsigned long reg :  3;		/* region number */
+	} f;
+	unsigned long l;
+	void *p;
+} ia64_va;
+
+/*
+ * Note: These macros depend on the fact that PAGE_OFFSET has all
+ * region bits set to 1 and all other bits set to zero.  They are
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
+#define __pa(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+#define __va(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
+
+#define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+#define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define htlbpage_to_page(x)	(((unsigned long) REGION_NUMBER(x) << 61)			\
+				 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
+# define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
+extern unsigned int hpage_shift;
+#endif
+
+static __inline__ int
+get_order (unsigned long size)
+{
+	long double d = size - 1;
+	long order;
+
+	order = ia64_getf_exp(d);
+	order = order - PAGE_SHIFT - 0xffff + 1;
+	if (order < 0)
+		order = 0;
+	return order;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef STRICT_MM_TYPECHECKS
+  /*
+   * These are used to make use of C type-checking..
+   */
+  typedef struct { unsigned long pte; } pte_t;
+  typedef struct { unsigned long pmd; } pmd_t;
+#if CONFIG_PGTABLE_LEVELS == 4
+  typedef struct { unsigned long pud; } pud_t;
+#endif
+  typedef struct { unsigned long pgd; } pgd_t;
+  typedef struct { unsigned long pgprot; } pgprot_t;
+  typedef struct page *pgtable_t;
+
+# define pte_val(x)	((x).pte)
+# define pmd_val(x)	((x).pmd)
+#if CONFIG_PGTABLE_LEVELS == 4
+# define pud_val(x)	((x).pud)
+#endif
+# define pgd_val(x)	((x).pgd)
+# define pgprot_val(x)	((x).pgprot)
+
+# define __pte(x)	((pte_t) { (x) } )
+# define __pmd(x)	((pmd_t) { (x) } )
+# define __pgprot(x)	((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+  /*
+   * .. while these make it easier on the compiler
+   */
+# ifndef __ASSEMBLY__
+    typedef unsigned long pte_t;
+    typedef unsigned long pmd_t;
+    typedef unsigned long pgd_t;
+    typedef unsigned long pgprot_t;
+    typedef struct page *pgtable_t;
+# endif
+
+# define pte_val(x)	(x)
+# define pmd_val(x)	(x)
+# define pgd_val(x)	(x)
+# define pgprot_val(x)	(x)
+
+# define __pte(x)	(x)
+# define __pgd(x)	(x)
+# define __pgprot(x)	(x)
+#endif /* !STRICT_MM_TYPECHECKS */
+
+#define PAGE_OFFSET			RGN_BASE(RGN_KERNEL)
+
+#define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE |					\
+					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |		\
+					 (((current->personality & READ_IMPLIES_EXEC) != 0)	\
+					  ? VM_EXEC : 0))
+
+#define GATE_ADDR		RGN_BASE(RGN_GATE)
+
+/*
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
+#define KERNEL_START		 (GATE_ADDR+__IA64_UL_CONST(0x100000000))
+#define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
+#define LOAD_OFFSET		(KERNEL_START - KERNEL_TR_PAGE_SIZE)
+
+#define __HAVE_ARCH_GATE_AREA	1
+
+#endif /* _ASM_IA64_PAGE_H */
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
new file mode 100644
index 0000000..2e69284
--- /dev/null
+++ b/arch/ia64/include/asm/pal.h
@@ -0,0 +1,1825 @@
+#ifndef _ASM_IA64_PAL_H
+#define _ASM_IA64_PAL_H
+
+/*
+ * Processor Abstraction Layer definitions.
+ *
+ * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0
+ * chapter 11 IA-64 Processor Abstraction Layer
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *	Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
+ *
+ * 99/10/01	davidm	Make sure we pass zero for reserved parameters.
+ * 00/03/07	davidm	Updated pal_cache_flush() to be in sync with PAL v2.6.
+ * 00/03/23     cfleck  Modified processor min-state save area to match updated PAL & SAL info
+ * 00/05/24     eranian Updated to latest PAL spec, fix structures bugs, added
+ * 00/05/25	eranian Support for stack calls, and static physical calls
+ * 00/06/18	eranian Support for stacked physical calls
+ * 06/10/26	rja	Support for Intel Itanium Architecture Software Developer's
+ *			Manual Rev 2.2 (Jan 2006)
+ */
+
+/*
+ * Note that some of these calls use a static-register only calling
+ * convention which has nothing to do with the regular calling
+ * convention.
+ */
+#define PAL_CACHE_FLUSH		1	/* flush i/d cache */
+#define PAL_CACHE_INFO		2	/* get detailed i/d cache info */
+#define PAL_CACHE_INIT		3	/* initialize i/d cache */
+#define PAL_CACHE_SUMMARY	4	/* get summary of cache hierarchy */
+#define PAL_MEM_ATTRIB		5	/* list supported memory attributes */
+#define PAL_PTCE_INFO		6	/* purge TLB info */
+#define PAL_VM_INFO		7	/* return supported virtual memory features */
+#define PAL_VM_SUMMARY		8	/* return summary on supported vm features */
+#define PAL_BUS_GET_FEATURES	9	/* return processor bus interface features settings */
+#define PAL_BUS_SET_FEATURES	10	/* set processor bus features */
+#define PAL_DEBUG_INFO		11	/* get number of debug registers */
+#define PAL_FIXED_ADDR		12	/* get fixed component of processors's directed address */
+#define PAL_FREQ_BASE		13	/* base frequency of the platform */
+#define PAL_FREQ_RATIOS		14	/* ratio of processor, bus and ITC frequency */
+#define PAL_PERF_MON_INFO	15	/* return performance monitor info */
+#define PAL_PLATFORM_ADDR	16	/* set processor interrupt block and IO port space addr */
+#define PAL_PROC_GET_FEATURES	17	/* get configurable processor features & settings */
+#define PAL_PROC_SET_FEATURES	18	/* enable/disable configurable processor features */
+#define PAL_RSE_INFO		19	/* return rse information */
+#define PAL_VERSION		20	/* return version of PAL code */
+#define PAL_MC_CLEAR_LOG	21	/* clear all processor log info */
+#define PAL_MC_DRAIN		22	/* drain operations which could result in an MCA */
+#define PAL_MC_EXPECTED		23	/* set/reset expected MCA indicator */
+#define PAL_MC_DYNAMIC_STATE	24	/* get processor dynamic state */
+#define PAL_MC_ERROR_INFO	25	/* get processor MCA info and static state */
+#define PAL_MC_RESUME		26	/* Return to interrupted process */
+#define PAL_MC_REGISTER_MEM	27	/* Register memory for PAL to use during MCAs and inits */
+#define PAL_HALT		28	/* enter the low power HALT state */
+#define PAL_HALT_LIGHT		29	/* enter the low power light halt state*/
+#define PAL_COPY_INFO		30	/* returns info needed to relocate PAL */
+#define PAL_CACHE_LINE_INIT	31	/* init tags & data of cache line */
+#define PAL_PMI_ENTRYPOINT	32	/* register PMI memory entry points with the processor */
+#define PAL_ENTER_IA_32_ENV	33	/* enter IA-32 system environment */
+#define PAL_VM_PAGE_SIZE	34	/* return vm TC and page walker page sizes */
+
+#define PAL_MEM_FOR_TEST	37	/* get amount of memory needed for late processor test */
+#define PAL_CACHE_PROT_INFO	38	/* get i/d cache protection info */
+#define PAL_REGISTER_INFO	39	/* return AR and CR register information*/
+#define PAL_SHUTDOWN		40	/* enter processor shutdown state */
+#define PAL_PREFETCH_VISIBILITY	41	/* Make Processor Prefetches Visible */
+#define PAL_LOGICAL_TO_PHYSICAL 42	/* returns information on logical to physical processor mapping */
+#define PAL_CACHE_SHARED_INFO	43	/* returns information on caches shared by logical processor */
+#define PAL_GET_HW_POLICY	48	/* Get current hardware resource sharing policy */
+#define PAL_SET_HW_POLICY	49	/* Set current hardware resource sharing policy */
+#define PAL_VP_INFO		50	/* Information about virtual processor features */
+#define PAL_MC_HW_TRACKING	51	/* Hardware tracking status */
+
+#define PAL_COPY_PAL		256	/* relocate PAL procedures and PAL PMI */
+#define PAL_HALT_INFO		257	/* return the low power capabilities of processor */
+#define PAL_TEST_PROC		258	/* perform late processor self-test */
+#define PAL_CACHE_READ		259	/* read tag & data of cacheline for diagnostic testing */
+#define PAL_CACHE_WRITE		260	/* write tag & data of cacheline for diagnostic testing */
+#define PAL_VM_TR_READ		261	/* read contents of translation register */
+#define PAL_GET_PSTATE		262	/* get the current P-state */
+#define PAL_SET_PSTATE		263	/* set the P-state */
+#define PAL_BRAND_INFO		274	/* Processor branding information */
+
+#define PAL_GET_PSTATE_TYPE_LASTSET	0
+#define PAL_GET_PSTATE_TYPE_AVGANDRESET	1
+#define PAL_GET_PSTATE_TYPE_AVGNORESET	2
+#define PAL_GET_PSTATE_TYPE_INSTANT	3
+
+#define PAL_MC_ERROR_INJECT	276	/* Injects processor error or returns injection capabilities */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+
+/*
+ * Data types needed to pass information into PAL procedures and
+ * interpret information returned by them.
+ */
+
+/* Return status from the PAL procedure */
+typedef s64				pal_status_t;
+
+#define PAL_STATUS_SUCCESS		0	/* No error */
+#define PAL_STATUS_UNIMPLEMENTED	(-1)	/* Unimplemented procedure */
+#define PAL_STATUS_EINVAL		(-2)	/* Invalid argument */
+#define PAL_STATUS_ERROR		(-3)	/* Error */
+#define PAL_STATUS_CACHE_INIT_FAIL	(-4)	/* Could not initialize the
+						 * specified level and type of
+						 * cache without sideeffects
+						 * and "restrict" was 1
+						 */
+#define PAL_STATUS_REQUIRES_MEMORY	(-9)	/* Call requires PAL memory buffer */
+
+/* Processor cache level in the hierarchy */
+typedef u64				pal_cache_level_t;
+#define PAL_CACHE_LEVEL_L0		0	/* L0 */
+#define PAL_CACHE_LEVEL_L1		1	/* L1 */
+#define PAL_CACHE_LEVEL_L2		2	/* L2 */
+
+
+/* Processor cache type at a particular level in the hierarchy */
+
+typedef u64				pal_cache_type_t;
+#define PAL_CACHE_TYPE_INSTRUCTION	1	/* Instruction cache */
+#define PAL_CACHE_TYPE_DATA		2	/* Data or unified cache */
+#define PAL_CACHE_TYPE_INSTRUCTION_DATA	3	/* Both Data & Instruction */
+
+
+#define PAL_CACHE_FLUSH_INVALIDATE	1	/* Invalidate clean lines */
+#define PAL_CACHE_FLUSH_CHK_INTRS	2	/* check for interrupts/mc while flushing */
+
+/* Processor cache line size in bytes  */
+typedef int				pal_cache_line_size_t;
+
+/* Processor cache line state */
+typedef u64				pal_cache_line_state_t;
+#define PAL_CACHE_LINE_STATE_INVALID	0	/* Invalid */
+#define PAL_CACHE_LINE_STATE_SHARED	1	/* Shared */
+#define PAL_CACHE_LINE_STATE_EXCLUSIVE	2	/* Exclusive */
+#define PAL_CACHE_LINE_STATE_MODIFIED	3	/* Modified */
+
+typedef struct pal_freq_ratio {
+	u32 den, num;		/* numerator & denominator */
+} itc_ratio, proc_ratio;
+
+typedef	union  pal_cache_config_info_1_s {
+	struct {
+		u64		u		: 1,	/* 0 Unified cache ? */
+				at		: 2,	/* 2-1 Cache mem attr*/
+				reserved	: 5,	/* 7-3 Reserved */
+				associativity	: 8,	/* 16-8 Associativity*/
+				line_size	: 8,	/* 23-17 Line size */
+				stride		: 8,	/* 31-24 Stride */
+				store_latency	: 8,	/*39-32 Store latency*/
+				load_latency	: 8,	/* 47-40 Load latency*/
+				store_hints	: 8,	/* 55-48 Store hints*/
+				load_hints	: 8;	/* 63-56 Load hints */
+	} pcci1_bits;
+	u64			pcci1_data;
+} pal_cache_config_info_1_t;
+
+typedef	union  pal_cache_config_info_2_s {
+	struct {
+		u32		cache_size;		/*cache size in bytes*/
+
+
+		u32		alias_boundary	: 8,	/* 39-32 aliased addr
+							 * separation for max
+							 * performance.
+							 */
+				tag_ls_bit	: 8,	/* 47-40 LSb of addr*/
+				tag_ms_bit	: 8,	/* 55-48 MSb of addr*/
+				reserved	: 8;	/* 63-56 Reserved */
+	} pcci2_bits;
+	u64			pcci2_data;
+} pal_cache_config_info_2_t;
+
+
+typedef struct pal_cache_config_info_s {
+	pal_status_t			pcci_status;
+	pal_cache_config_info_1_t	pcci_info_1;
+	pal_cache_config_info_2_t	pcci_info_2;
+	u64				pcci_reserved;
+} pal_cache_config_info_t;
+
+#define pcci_ld_hints		pcci_info_1.pcci1_bits.load_hints
+#define pcci_st_hints		pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_latency		pcci_info_1.pcci1_bits.load_latency
+#define pcci_st_latency		pcci_info_1.pcci1_bits.store_latency
+#define pcci_stride		pcci_info_1.pcci1_bits.stride
+#define pcci_line_size		pcci_info_1.pcci1_bits.line_size
+#define pcci_assoc		pcci_info_1.pcci1_bits.associativity
+#define pcci_cache_attr		pcci_info_1.pcci1_bits.at
+#define pcci_unified		pcci_info_1.pcci1_bits.u
+#define pcci_tag_msb		pcci_info_2.pcci2_bits.tag_ms_bit
+#define pcci_tag_lsb		pcci_info_2.pcci2_bits.tag_ls_bit
+#define pcci_alias_boundary	pcci_info_2.pcci2_bits.alias_boundary
+#define pcci_cache_size		pcci_info_2.pcci2_bits.cache_size
+
+
+
+/* Possible values for cache attributes */
+
+#define PAL_CACHE_ATTR_WT		0	/* Write through cache */
+#define PAL_CACHE_ATTR_WB		1	/* Write back cache */
+#define PAL_CACHE_ATTR_WT_OR_WB		2	/* Either write thru or write
+						 * back depending on TLB
+						 * memory attributes
+						 */
+
+
+/* Possible values for cache hints */
+
+#define PAL_CACHE_HINT_TEMP_1		0	/* Temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_1		1	/* Non-temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_ALL	3	/* Non-temporal all levels */
+
+/* Processor cache protection  information */
+typedef union pal_cache_protection_element_u {
+	u32			pcpi_data;
+	struct {
+		u32		data_bits	: 8, /* # data bits covered by
+						      * each unit of protection
+						      */
+
+				tagprot_lsb	: 6, /* Least -do- */
+				tagprot_msb	: 6, /* Most Sig. tag address
+						      * bit that this
+						      * protection covers.
+						      */
+				prot_bits	: 6, /* # of protection bits */
+				method		: 4, /* Protection method */
+				t_d		: 2; /* Indicates which part
+						      * of the cache this
+						      * protection encoding
+						      * applies.
+						      */
+	} pcp_info;
+} pal_cache_protection_element_t;
+
+#define pcpi_cache_prot_part	pcp_info.t_d
+#define pcpi_prot_method	pcp_info.method
+#define pcpi_prot_bits		pcp_info.prot_bits
+#define pcpi_tagprot_msb	pcp_info.tagprot_msb
+#define pcpi_tagprot_lsb	pcp_info.tagprot_lsb
+#define pcpi_data_bits		pcp_info.data_bits
+
+/* Processor cache part encodings */
+#define PAL_CACHE_PROT_PART_DATA	0	/* Data protection  */
+#define PAL_CACHE_PROT_PART_TAG		1	/* Tag  protection */
+#define PAL_CACHE_PROT_PART_TAG_DATA	2	/* Tag+data protection (tag is
+						 * more significant )
+						 */
+#define PAL_CACHE_PROT_PART_DATA_TAG	3	/* Data+tag protection (data is
+						 * more significant )
+						 */
+#define PAL_CACHE_PROT_PART_MAX		6
+
+
+typedef struct pal_cache_protection_info_s {
+	pal_status_t			pcpi_status;
+	pal_cache_protection_element_t	pcp_info[PAL_CACHE_PROT_PART_MAX];
+} pal_cache_protection_info_t;
+
+
+/* Processor cache protection method encodings */
+#define PAL_CACHE_PROT_METHOD_NONE		0	/* No protection */
+#define PAL_CACHE_PROT_METHOD_ODD_PARITY	1	/* Odd parity */
+#define PAL_CACHE_PROT_METHOD_EVEN_PARITY	2	/* Even parity */
+#define PAL_CACHE_PROT_METHOD_ECC		3	/* ECC protection */
+
+
+/* Processor cache line identification in the hierarchy */
+typedef union pal_cache_line_id_u {
+	u64			pclid_data;
+	struct {
+		u64		cache_type	: 8,	/* 7-0 cache type */
+				level		: 8,	/* 15-8 level of the
+							 * cache in the
+							 * hierarchy.
+							 */
+				way		: 8,	/* 23-16 way in the set
+							 */
+				part		: 8,	/* 31-24 part of the
+							 * cache
+							 */
+				reserved	: 32;	/* 63-32 is reserved*/
+	} pclid_info_read;
+	struct {
+		u64		cache_type	: 8,	/* 7-0 cache type */
+				level		: 8,	/* 15-8 level of the
+							 * cache in the
+							 * hierarchy.
+							 */
+				way		: 8,	/* 23-16 way in the set
+							 */
+				part		: 8,	/* 31-24 part of the
+							 * cache
+							 */
+				mesi		: 8,	/* 39-32 cache line
+							 * state
+							 */
+				start		: 8,	/* 47-40 lsb of data to
+							 * invert
+							 */
+				length		: 8,	/* 55-48 #bits to
+							 * invert
+							 */
+				trigger		: 8;	/* 63-56 Trigger error
+							 * by doing a load
+							 * after the write
+							 */
+
+	} pclid_info_write;
+} pal_cache_line_id_u_t;
+
+#define pclid_read_part		pclid_info_read.part
+#define pclid_read_way		pclid_info_read.way
+#define pclid_read_level	pclid_info_read.level
+#define pclid_read_cache_type	pclid_info_read.cache_type
+
+#define pclid_write_trigger	pclid_info_write.trigger
+#define pclid_write_length	pclid_info_write.length
+#define pclid_write_start	pclid_info_write.start
+#define pclid_write_mesi	pclid_info_write.mesi
+#define pclid_write_part	pclid_info_write.part
+#define pclid_write_way		pclid_info_write.way
+#define pclid_write_level	pclid_info_write.level
+#define pclid_write_cache_type	pclid_info_write.cache_type
+
+/* Processor cache line part encodings */
+#define PAL_CACHE_LINE_ID_PART_DATA		0	/* Data */
+#define PAL_CACHE_LINE_ID_PART_TAG		1	/* Tag */
+#define PAL_CACHE_LINE_ID_PART_DATA_PROT	2	/* Data protection */
+#define PAL_CACHE_LINE_ID_PART_TAG_PROT		3	/* Tag protection */
+#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT	4	/* Data+tag
+							 * protection
+							 */
+typedef struct pal_cache_line_info_s {
+	pal_status_t		pcli_status;		/* Return status of the read cache line
+							 * info call.
+							 */
+	u64			pcli_data;		/* 64-bit data, tag, protection bits .. */
+	u64			pcli_data_len;		/* data length in bits */
+	pal_cache_line_state_t	pcli_cache_line_state;	/* mesi state */
+
+} pal_cache_line_info_t;
+
+
+/* Machine Check related crap */
+
+/* Pending event status bits  */
+typedef u64					pal_mc_pending_events_t;
+
+#define PAL_MC_PENDING_MCA			(1 << 0)
+#define PAL_MC_PENDING_INIT			(1 << 1)
+
+/* Error information type */
+typedef u64					pal_mc_info_index_t;
+
+#define PAL_MC_INFO_PROCESSOR			0	/* Processor */
+#define PAL_MC_INFO_CACHE_CHECK			1	/* Cache check */
+#define PAL_MC_INFO_TLB_CHECK			2	/* Tlb check */
+#define PAL_MC_INFO_BUS_CHECK			3	/* Bus check */
+#define PAL_MC_INFO_REQ_ADDR			4	/* Requestor address */
+#define PAL_MC_INFO_RESP_ADDR			5	/* Responder address */
+#define PAL_MC_INFO_TARGET_ADDR			6	/* Target address */
+#define PAL_MC_INFO_IMPL_DEP			7	/* Implementation
+							 * dependent
+							 */
+
+#define PAL_TLB_CHECK_OP_PURGE			8
+
+typedef struct pal_process_state_info_s {
+	u64		reserved1	: 2,
+			rz		: 1,	/* PAL_CHECK processor
+						 * rendezvous
+						 * successful.
+						 */
+
+			ra		: 1,	/* PAL_CHECK attempted
+						 * a rendezvous.
+						 */
+			me		: 1,	/* Distinct multiple
+						 * errors occurred
+						 */
+
+			mn		: 1,	/* Min. state save
+						 * area has been
+						 * registered with PAL
+						 */
+
+			sy		: 1,	/* Storage integrity
+						 * synched
+						 */
+
+
+			co		: 1,	/* Continuable */
+			ci		: 1,	/* MC isolated */
+			us		: 1,	/* Uncontained storage
+						 * damage.
+						 */
+
+
+			hd		: 1,	/* Non-essential hw
+						 * lost (no loss of
+						 * functionality)
+						 * causing the
+						 * processor to run in
+						 * degraded mode.
+						 */
+
+			tl		: 1,	/* 1 => MC occurred
+						 * after an instr was
+						 * executed but before
+						 * the trap that
+						 * resulted from instr
+						 * execution was
+						 * generated.
+						 * (Trap Lost )
+						 */
+			mi		: 1,	/* More information available
+						 * call PAL_MC_ERROR_INFO
+						 */
+			pi		: 1,	/* Precise instruction pointer */
+			pm		: 1,	/* Precise min-state save area */
+
+			dy		: 1,	/* Processor dynamic
+						 * state valid
+						 */
+
+
+			in		: 1,	/* 0 = MC, 1 = INIT */
+			rs		: 1,	/* RSE valid */
+			cm		: 1,	/* MC corrected */
+			ex		: 1,	/* MC is expected */
+			cr		: 1,	/* Control regs valid*/
+			pc		: 1,	/* Perf cntrs valid */
+			dr		: 1,	/* Debug regs valid */
+			tr		: 1,	/* Translation regs
+						 * valid
+						 */
+			rr		: 1,	/* Region regs valid */
+			ar		: 1,	/* App regs valid */
+			br		: 1,	/* Branch regs valid */
+			pr		: 1,	/* Predicate registers
+						 * valid
+						 */
+
+			fp		: 1,	/* fp registers valid*/
+			b1		: 1,	/* Preserved bank one
+						 * general registers
+						 * are valid
+						 */
+			b0		: 1,	/* Preserved bank zero
+						 * general registers
+						 * are valid
+						 */
+			gr		: 1,	/* General registers
+						 * are valid
+						 * (excl. banked regs)
+						 */
+			dsize		: 16,	/* size of dynamic
+						 * state returned
+						 * by the processor
+						 */
+
+			se		: 1,	/* Shared error.  MCA in a
+						   shared structure */
+			reserved2	: 10,
+			cc		: 1,	/* Cache check */
+			tc		: 1,	/* TLB check */
+			bc		: 1,	/* Bus check */
+			rc		: 1,	/* Register file check */
+			uc		: 1;	/* Uarch check */
+
+} pal_processor_state_info_t;
+
+typedef struct pal_cache_check_info_s {
+	u64		op		: 4,	/* Type of cache
+						 * operation that
+						 * caused the machine
+						 * check.
+						 */
+			level		: 2,	/* Cache level */
+			reserved1	: 2,
+			dl		: 1,	/* Failure in data part
+						 * of cache line
+						 */
+			tl		: 1,	/* Failure in tag part
+						 * of cache line
+						 */
+			dc		: 1,	/* Failure in dcache */
+			ic		: 1,	/* Failure in icache */
+			mesi		: 3,	/* Cache line state */
+			mv		: 1,	/* mesi valid */
+			way		: 5,	/* Way in which the
+						 * error occurred
+						 */
+			wiv		: 1,	/* Way field valid */
+			reserved2	: 1,
+			dp		: 1,	/* Data poisoned on MBE */
+			reserved3	: 6,
+			hlth		: 2,	/* Health indicator */
+
+			index		: 20,	/* Cache line index */
+			reserved4	: 2,
+
+			is		: 1,	/* instruction set (1 == ia32) */
+			iv		: 1,	/* instruction set field valid */
+			pl		: 2,	/* privilege level */
+			pv		: 1,	/* privilege level field valid */
+			mcc		: 1,	/* Machine check corrected */
+			tv		: 1,	/* Target address
+						 * structure is valid
+						 */
+			rq		: 1,	/* Requester identifier
+						 * structure is valid
+						 */
+			rp		: 1,	/* Responder identifier
+						 * structure is valid
+						 */
+			pi		: 1;	/* Precise instruction pointer
+						 * structure is valid
+						 */
+} pal_cache_check_info_t;
+
+typedef struct pal_tlb_check_info_s {
+
+	u64		tr_slot		: 8,	/* Slot# of TR where
+						 * error occurred
+						 */
+			trv		: 1,	/* tr_slot field is valid */
+			reserved1	: 1,
+			level		: 2,	/* TLB level where failure occurred */
+			reserved2	: 4,
+			dtr		: 1,	/* Fail in data TR */
+			itr		: 1,	/* Fail in inst TR */
+			dtc		: 1,	/* Fail in data TC */
+			itc		: 1,	/* Fail in inst. TC */
+			op		: 4,	/* Cache operation */
+			reserved3	: 6,
+			hlth		: 2,	/* Health indicator */
+			reserved4	: 22,
+
+			is		: 1,	/* instruction set (1 == ia32) */
+			iv		: 1,	/* instruction set field valid */
+			pl		: 2,	/* privilege level */
+			pv		: 1,	/* privilege level field valid */
+			mcc		: 1,	/* Machine check corrected */
+			tv		: 1,	/* Target address
+						 * structure is valid
+						 */
+			rq		: 1,	/* Requester identifier
+						 * structure is valid
+						 */
+			rp		: 1,	/* Responder identifier
+						 * structure is valid
+						 */
+			pi		: 1;	/* Precise instruction pointer
+						 * structure is valid
+						 */
+} pal_tlb_check_info_t;
+
+typedef struct pal_bus_check_info_s {
+	u64		size		: 5,	/* Xaction size */
+			ib		: 1,	/* Internal bus error */
+			eb		: 1,	/* External bus error */
+			cc		: 1,	/* Error occurred
+						 * during cache-cache
+						 * transfer.
+						 */
+			type		: 8,	/* Bus xaction type*/
+			sev		: 5,	/* Bus error severity*/
+			hier		: 2,	/* Bus hierarchy level */
+			dp		: 1,	/* Data poisoned on MBE */
+			bsi		: 8,	/* Bus error status
+						 * info
+						 */
+			reserved2	: 22,
+
+			is		: 1,	/* instruction set (1 == ia32) */
+			iv		: 1,	/* instruction set field valid */
+			pl		: 2,	/* privilege level */
+			pv		: 1,	/* privilege level field valid */
+			mcc		: 1,	/* Machine check corrected */
+			tv		: 1,	/* Target address
+						 * structure is valid
+						 */
+			rq		: 1,	/* Requester identifier
+						 * structure is valid
+						 */
+			rp		: 1,	/* Responder identifier
+						 * structure is valid
+						 */
+			pi		: 1;	/* Precise instruction pointer
+						 * structure is valid
+						 */
+} pal_bus_check_info_t;
+
+typedef struct pal_reg_file_check_info_s {
+	u64		id		: 4,	/* Register file identifier */
+			op		: 4,	/* Type of register
+						 * operation that
+						 * caused the machine
+						 * check.
+						 */
+			reg_num		: 7,	/* Register number */
+			rnv		: 1,	/* reg_num valid */
+			reserved2	: 38,
+
+			is		: 1,	/* instruction set (1 == ia32) */
+			iv		: 1,	/* instruction set field valid */
+			pl		: 2,	/* privilege level */
+			pv		: 1,	/* privilege level field valid */
+			mcc		: 1,	/* Machine check corrected */
+			reserved3	: 3,
+			pi		: 1;	/* Precise instruction pointer
+						 * structure is valid
+						 */
+} pal_reg_file_check_info_t;
+
+typedef struct pal_uarch_check_info_s {
+	u64		sid		: 5,	/* Structure identification */
+			level		: 3,	/* Level of failure */
+			array_id	: 4,	/* Array identification */
+			op		: 4,	/* Type of
+						 * operation that
+						 * caused the machine
+						 * check.
+						 */
+			way		: 6,	/* Way of structure */
+			wv		: 1,	/* way valid */
+			xv		: 1,	/* index valid */
+			reserved1	: 6,
+			hlth		: 2,	/* Health indicator */
+			index		: 8,	/* Index or set of the uarch
+						 * structure that failed.
+						 */
+			reserved2	: 24,
+
+			is		: 1,	/* instruction set (1 == ia32) */
+			iv		: 1,	/* instruction set field valid */
+			pl		: 2,	/* privilege level */
+			pv		: 1,	/* privilege level field valid */
+			mcc		: 1,	/* Machine check corrected */
+			tv		: 1,	/* Target address
+						 * structure is valid
+						 */
+			rq		: 1,	/* Requester identifier
+						 * structure is valid
+						 */
+			rp		: 1,	/* Responder identifier
+						 * structure is valid
+						 */
+			pi		: 1;	/* Precise instruction pointer
+						 * structure is valid
+						 */
+} pal_uarch_check_info_t;
+
+typedef union pal_mc_error_info_u {
+	u64				pmei_data;
+	pal_processor_state_info_t	pme_processor;
+	pal_cache_check_info_t		pme_cache;
+	pal_tlb_check_info_t		pme_tlb;
+	pal_bus_check_info_t		pme_bus;
+	pal_reg_file_check_info_t	pme_reg_file;
+	pal_uarch_check_info_t		pme_uarch;
+} pal_mc_error_info_t;
+
+#define pmci_proc_unknown_check			pme_processor.uc
+#define pmci_proc_bus_check			pme_processor.bc
+#define pmci_proc_tlb_check			pme_processor.tc
+#define pmci_proc_cache_check			pme_processor.cc
+#define pmci_proc_dynamic_state_size		pme_processor.dsize
+#define pmci_proc_gpr_valid			pme_processor.gr
+#define pmci_proc_preserved_bank0_gpr_valid	pme_processor.b0
+#define pmci_proc_preserved_bank1_gpr_valid	pme_processor.b1
+#define pmci_proc_fp_valid			pme_processor.fp
+#define pmci_proc_predicate_regs_valid		pme_processor.pr
+#define pmci_proc_branch_regs_valid		pme_processor.br
+#define pmci_proc_app_regs_valid		pme_processor.ar
+#define pmci_proc_region_regs_valid		pme_processor.rr
+#define pmci_proc_translation_regs_valid	pme_processor.tr
+#define pmci_proc_debug_regs_valid		pme_processor.dr
+#define pmci_proc_perf_counters_valid		pme_processor.pc
+#define pmci_proc_control_regs_valid		pme_processor.cr
+#define pmci_proc_machine_check_expected	pme_processor.ex
+#define pmci_proc_machine_check_corrected	pme_processor.cm
+#define pmci_proc_rse_valid			pme_processor.rs
+#define pmci_proc_machine_check_or_init		pme_processor.in
+#define pmci_proc_dynamic_state_valid		pme_processor.dy
+#define pmci_proc_operation			pme_processor.op
+#define pmci_proc_trap_lost			pme_processor.tl
+#define pmci_proc_hardware_damage		pme_processor.hd
+#define pmci_proc_uncontained_storage_damage	pme_processor.us
+#define pmci_proc_machine_check_isolated	pme_processor.ci
+#define pmci_proc_continuable			pme_processor.co
+#define pmci_proc_storage_intergrity_synced	pme_processor.sy
+#define pmci_proc_min_state_save_area_regd	pme_processor.mn
+#define	pmci_proc_distinct_multiple_errors	pme_processor.me
+#define pmci_proc_pal_attempted_rendezvous	pme_processor.ra
+#define pmci_proc_pal_rendezvous_complete	pme_processor.rz
+
+
+#define pmci_cache_level			pme_cache.level
+#define pmci_cache_line_state			pme_cache.mesi
+#define pmci_cache_line_state_valid		pme_cache.mv
+#define pmci_cache_line_index			pme_cache.index
+#define pmci_cache_instr_cache_fail		pme_cache.ic
+#define pmci_cache_data_cache_fail		pme_cache.dc
+#define pmci_cache_line_tag_fail		pme_cache.tl
+#define pmci_cache_line_data_fail		pme_cache.dl
+#define pmci_cache_operation			pme_cache.op
+#define pmci_cache_way_valid			pme_cache.wv
+#define pmci_cache_target_address_valid		pme_cache.tv
+#define pmci_cache_way				pme_cache.way
+#define pmci_cache_mc				pme_cache.mc
+
+#define pmci_tlb_instr_translation_cache_fail	pme_tlb.itc
+#define pmci_tlb_data_translation_cache_fail	pme_tlb.dtc
+#define pmci_tlb_instr_translation_reg_fail	pme_tlb.itr
+#define pmci_tlb_data_translation_reg_fail	pme_tlb.dtr
+#define pmci_tlb_translation_reg_slot		pme_tlb.tr_slot
+#define pmci_tlb_mc				pme_tlb.mc
+
+#define pmci_bus_status_info			pme_bus.bsi
+#define pmci_bus_req_address_valid		pme_bus.rq
+#define pmci_bus_resp_address_valid		pme_bus.rp
+#define pmci_bus_target_address_valid		pme_bus.tv
+#define pmci_bus_error_severity			pme_bus.sev
+#define pmci_bus_transaction_type		pme_bus.type
+#define pmci_bus_cache_cache_transfer		pme_bus.cc
+#define pmci_bus_transaction_size		pme_bus.size
+#define pmci_bus_internal_error			pme_bus.ib
+#define pmci_bus_external_error			pme_bus.eb
+#define pmci_bus_mc				pme_bus.mc
+
+/*
+ * NOTE: this min_state_save area struct only includes the 1KB
+ * architectural state save area.  The other 3 KB is scratch space
+ * for PAL.
+ */
+
+typedef struct pal_min_state_area_s {
+	u64	pmsa_nat_bits;		/* nat bits for saved GRs  */
+	u64	pmsa_gr[15];		/* GR1	- GR15		   */
+	u64	pmsa_bank0_gr[16];	/* GR16 - GR31		   */
+	u64	pmsa_bank1_gr[16];	/* GR16 - GR31		   */
+	u64	pmsa_pr;		/* predicate registers	   */
+	u64	pmsa_br0;		/* branch register 0	   */
+	u64	pmsa_rsc;		/* ar.rsc		   */
+	u64	pmsa_iip;		/* cr.iip		   */
+	u64	pmsa_ipsr;		/* cr.ipsr		   */
+	u64	pmsa_ifs;		/* cr.ifs		   */
+	u64	pmsa_xip;		/* previous iip		   */
+	u64	pmsa_xpsr;		/* previous psr		   */
+	u64	pmsa_xfs;		/* previous ifs		   */
+	u64	pmsa_br1;		/* branch register 1	   */
+	u64	pmsa_reserved[70];	/* pal_min_state_area should total to 1KB */
+} pal_min_state_area_t;
+
+
+struct ia64_pal_retval {
+	/*
+	 * A zero status value indicates call completed without error.
+	 * A negative status value indicates reason of call failure.
+	 * A positive status value indicates success but an
+	 * informational value should be printed (e.g., "reboot for
+	 * change to take effect").
+	 */
+	s64 status;
+	u64 v0;
+	u64 v1;
+	u64 v2;
+};
+
+/*
+ * Note: Currently unused PAL arguments are generally labeled
+ * "reserved" so the value specified in the PAL documentation
+ * (generally 0) MUST be passed.  Reserved parameters are not optional
+ * parameters.
+ */
+extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
+extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
+extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
+
+#define PAL_CALL(iprv,a0,a1,a2,a3) do {			\
+	struct ia64_fpreg fr[6];			\
+	ia64_save_scratch_fpregs(fr);			\
+	iprv = ia64_pal_call_static(a0, a1, a2, a3);	\
+	ia64_load_scratch_fpregs(fr);			\
+} while (0)
+
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do {		\
+	struct ia64_fpreg fr[6];			\
+	ia64_save_scratch_fpregs(fr);			\
+	iprv = ia64_pal_call_stacked(a0, a1, a2, a3);	\
+	ia64_load_scratch_fpregs(fr);			\
+} while (0)
+
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do {			\
+	struct ia64_fpreg fr[6];				\
+	ia64_save_scratch_fpregs(fr);				\
+	iprv = ia64_pal_call_phys_static(a0, a1, a2, a3);	\
+	ia64_load_scratch_fpregs(fr);				\
+} while (0)
+
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do {		\
+	struct ia64_fpreg fr[6];				\
+	ia64_save_scratch_fpregs(fr);				\
+	iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3);	\
+	ia64_load_scratch_fpregs(fr);				\
+} while (0)
+
+typedef int (*ia64_pal_handler) (u64, ...);
+extern ia64_pal_handler ia64_pal;
+extern void ia64_pal_handler_init (void *);
+
+extern ia64_pal_handler ia64_pal;
+
+extern pal_cache_config_info_t		l0d_cache_config_info;
+extern pal_cache_config_info_t		l0i_cache_config_info;
+extern pal_cache_config_info_t		l1_cache_config_info;
+extern pal_cache_config_info_t		l2_cache_config_info;
+
+extern pal_cache_protection_info_t	l0d_cache_protection_info;
+extern pal_cache_protection_info_t	l0i_cache_protection_info;
+extern pal_cache_protection_info_t	l1_cache_protection_info;
+extern pal_cache_protection_info_t	l2_cache_protection_info;
+
+extern pal_cache_config_info_t		pal_cache_config_info_get(pal_cache_level_t,
+								  pal_cache_type_t);
+
+extern pal_cache_protection_info_t	pal_cache_protection_info_get(pal_cache_level_t,
+								      pal_cache_type_t);
+
+
+extern void				pal_error(int);
+
+
+/* Useful wrappers for the current list of pal procedures */
+
+typedef union pal_bus_features_u {
+	u64	pal_bus_features_val;
+	struct {
+		u64	pbf_reserved1				:	29;
+		u64	pbf_req_bus_parking			:	1;
+		u64	pbf_bus_lock_mask			:	1;
+		u64	pbf_enable_half_xfer_rate		:	1;
+		u64	pbf_reserved2				:	20;
+		u64	pbf_enable_shared_line_replace		:	1;
+		u64	pbf_enable_exclusive_line_replace	:	1;
+		u64	pbf_disable_xaction_queueing		:	1;
+		u64	pbf_disable_resp_err_check		:	1;
+		u64	pbf_disable_berr_check			:	1;
+		u64	pbf_disable_bus_req_internal_err_signal	:	1;
+		u64	pbf_disable_bus_req_berr_signal		:	1;
+		u64	pbf_disable_bus_init_event_check	:	1;
+		u64	pbf_disable_bus_init_event_signal	:	1;
+		u64	pbf_disable_bus_addr_err_check		:	1;
+		u64	pbf_disable_bus_addr_err_signal		:	1;
+		u64	pbf_disable_bus_data_err_check		:	1;
+	} pal_bus_features_s;
+} pal_bus_features_u_t;
+
+extern void pal_bus_features_print (u64);
+
+/* Provide information about configurable processor bus features */
+static inline s64
+ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
+			   pal_bus_features_u_t *features_status,
+			   pal_bus_features_u_t *features_control)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+	if (features_avail)
+		features_avail->pal_bus_features_val = iprv.v0;
+	if (features_status)
+		features_status->pal_bus_features_val = iprv.v1;
+	if (features_control)
+		features_control->pal_bus_features_val = iprv.v2;
+	return iprv.status;
+}
+
+/* Enables/disables specific processor bus features */
+static inline s64
+ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
+	return iprv.status;
+}
+
+/* Get detailed cache information */
+static inline s64
+ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
+{
+	struct ia64_pal_retval iprv;
+
+	PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
+
+	if (iprv.status == 0) {
+		conf->pcci_status                 = iprv.status;
+		conf->pcci_info_1.pcci1_data      = iprv.v0;
+		conf->pcci_info_2.pcci2_data      = iprv.v1;
+		conf->pcci_reserved               = iprv.v2;
+	}
+	return iprv.status;
+
+}
+
+/* Get detailed cche protection information */
+static inline s64
+ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
+{
+	struct ia64_pal_retval iprv;
+
+	PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
+
+	if (iprv.status == 0) {
+		prot->pcpi_status           = iprv.status;
+		prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
+		prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
+		prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
+		prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
+		prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
+		prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
+	}
+	return iprv.status;
+}
+
+/*
+ * Flush the processor instruction or data caches.  *PROGRESS must be
+ * initialized to zero before calling this for the first time..
+ */
+static inline s64
+ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
+	if (vector)
+		*vector = iprv.v0;
+	*progress = iprv.v1;
+	return iprv.status;
+}
+
+
+/* Initialize the processor controlled caches */
+static inline s64
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
+	return iprv.status;
+}
+
+/* Initialize the tags and data of a data or unified cache line of
+ * processor controlled cache to known values without the availability
+ * of backing memory.
+ */
+static inline s64
+ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
+	return iprv.status;
+}
+
+
+/* Read the data and tag of a processor controlled cache line for diags */
+static inline s64
+ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data,
+				physical_addr, 0);
+	return iprv.status;
+}
+
+/* Return summary information about the hierarchy of caches controlled by the processor */
+static inline long ia64_pal_cache_summary(unsigned long *cache_levels,
+						unsigned long *unique_caches)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
+	if (cache_levels)
+		*cache_levels = iprv.v0;
+	if (unique_caches)
+		*unique_caches = iprv.v1;
+	return iprv.status;
+}
+
+/* Write the data and tag of a processor-controlled cache line for diags */
+static inline s64
+ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data,
+				physical_addr, data);
+	return iprv.status;
+}
+
+
+/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
+		    u64 *buffer_size, u64 *buffer_align)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
+	if (buffer_size)
+		*buffer_size = iprv.v0;
+	if (buffer_align)
+		*buffer_align = iprv.v1;
+	return iprv.status;
+}
+
+/* Copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
+	if (pal_proc_offset)
+		*pal_proc_offset = iprv.v0;
+	return iprv.status;
+}
+
+/* Return the number of instruction and data debug register pairs */
+static inline long ia64_pal_debug_info(unsigned long *inst_regs,
+						unsigned long *data_regs)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
+	if (inst_regs)
+		*inst_regs = iprv.v0;
+	if (data_regs)
+		*data_regs = iprv.v1;
+
+	return iprv.status;
+}
+
+#ifdef TBD
+/* Switch from IA64-system environment to IA-32 system environment */
+static inline s64
+ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
+	return iprv.status;
+}
+#endif
+
+/* Get unique geographical address of this processor on its bus */
+static inline s64
+ia64_pal_fixed_addr (u64 *global_unique_addr)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
+	if (global_unique_addr)
+		*global_unique_addr = iprv.v0;
+	return iprv.status;
+}
+
+/* Get base frequency of the platform if generated by the processor */
+static inline long ia64_pal_freq_base(unsigned long *platform_base_freq)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
+	if (platform_base_freq)
+		*platform_base_freq = iprv.v0;
+	return iprv.status;
+}
+
+/*
+ * Get the ratios for processor frequency, bus frequency and interval timer to
+ * to base frequency of the platform
+ */
+static inline s64
+ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
+		      struct pal_freq_ratio *itc_ratio)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
+	if (proc_ratio)
+		*(u64 *)proc_ratio = iprv.v0;
+	if (bus_ratio)
+		*(u64 *)bus_ratio = iprv.v1;
+	if (itc_ratio)
+		*(u64 *)itc_ratio = iprv.v2;
+	return iprv.status;
+}
+
+/*
+ * Get the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted,
+			u64 *la)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0);
+	if (cur_policy)
+		*cur_policy = iprv.v0;
+	if (num_impacted)
+		*num_impacted = iprv.v1;
+	if (la)
+		*la = iprv.v2;
+	return iprv.status;
+}
+
+/* Make the processor enter HALT or one of the implementation dependent low
+ * power states where prefetching and execution are suspended and cache and
+ * TLB coherency is not maintained.
+ */
+static inline s64
+ia64_pal_halt (u64 halt_state)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
+	return iprv.status;
+}
+
+typedef union pal_power_mgmt_info_u {
+	u64			ppmi_data;
+	struct {
+	       u64		exit_latency		: 16,
+				entry_latency		: 16,
+				power_consumption	: 28,
+				im			: 1,
+				co			: 1,
+				reserved		: 2;
+	} pal_power_mgmt_info_s;
+} pal_power_mgmt_info_u_t;
+
+/* Return information about processor's optional power management capabilities. */
+static inline s64
+ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+	return iprv.status;
+}
+
+/* Get the current P-state information */
+static inline s64
+ia64_pal_get_pstate (u64 *pstate_index, unsigned long type)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0);
+	*pstate_index = iprv.v0;
+	return iprv.status;
+}
+
+/* Set the P-state */
+static inline s64
+ia64_pal_set_pstate (u64 pstate_index)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
+	return iprv.status;
+}
+
+/* Processor branding information*/
+static inline s64
+ia64_pal_get_brand_info (char *brand_info)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0);
+	return iprv.status;
+}
+
+/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
+ * suspended, but cache and TLB coherency is maintained.
+ */
+static inline s64
+ia64_pal_halt_light (void)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
+	return iprv.status;
+}
+
+/* Clear all the processor error logging   registers and reset the indicator that allows
+ * the error logging registers to be written. This procedure also checks the pending
+ * machine check bit and pending INIT bit and reports their states.
+ */
+static inline s64
+ia64_pal_mc_clear_log (u64 *pending_vector)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
+	if (pending_vector)
+		*pending_vector = iprv.v0;
+	return iprv.status;
+}
+
+/* Ensure that all outstanding transactions in a processor are completed or that any
+ * MCA due to thes outstanding transaction is taken.
+ */
+static inline s64
+ia64_pal_mc_drain (void)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
+	return iprv.status;
+}
+
+/* Return the machine check dynamic processor state */
+static inline s64
+ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
+	if (size)
+		*size = iprv.v0;
+	return iprv.status;
+}
+
+/* Return processor machine check information */
+static inline s64
+ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
+	if (size)
+		*size = iprv.v0;
+	if (error_info)
+		*error_info = iprv.v1;
+	return iprv.status;
+}
+
+/* Injects the requested processor error or returns info on
+ * supported injection capabilities for current processor implementation
+ */
+static inline s64
+ia64_pal_mc_error_inject_phys (u64 err_type_info, u64 err_struct_info,
+			u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+			  err_struct_info, err_data_buffer);
+	if (capabilities)
+		*capabilities= iprv.v0;
+	if (resources)
+		*resources= iprv.v1;
+	return iprv.status;
+}
+
+static inline s64
+ia64_pal_mc_error_inject_virt (u64 err_type_info, u64 err_struct_info,
+			u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+			  err_struct_info, err_data_buffer);
+	if (capabilities)
+		*capabilities= iprv.v0;
+	if (resources)
+		*resources= iprv.v1;
+	return iprv.status;
+}
+
+/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
+ * attempt to correct any expected machine checks.
+ */
+static inline s64
+ia64_pal_mc_expected (u64 expected, u64 *previous)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
+	if (previous)
+		*previous = iprv.v0;
+	return iprv.status;
+}
+
+typedef union pal_hw_tracking_u {
+	u64			pht_data;
+	struct {
+		u64		itc	:4,	/* Instruction cache tracking */
+				dct	:4,	/* Date cache tracking */
+				itt	:4,	/* Instruction TLB tracking */
+				ddt	:4,	/* Data TLB tracking */
+				reserved:48;
+	} pal_hw_tracking_s;
+} pal_hw_tracking_u_t;
+
+/*
+ * Hardware tracking status.
+ */
+static inline s64
+ia64_pal_mc_hw_tracking (u64 *status)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
+	if (status)
+		*status = iprv.v0;
+	return iprv.status;
+}
+
+/* Register a platform dependent location with PAL to which it can save
+ * minimal processor state in the event of a machine check or initialization
+ * event.
+ */
+static inline s64
+ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
+	if (req_size)
+		*req_size = iprv.v0;
+	return iprv.status;
+}
+
+/* Restore minimal architectural processor state, set CMC interrupt if necessary
+ * and resume execution
+ */
+static inline s64
+ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
+	return iprv.status;
+}
+
+/* Return the memory attributes implemented by the processor */
+static inline s64
+ia64_pal_mem_attrib (u64 *mem_attrib)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
+	if (mem_attrib)
+		*mem_attrib = iprv.v0 & 0xff;
+	return iprv.status;
+}
+
+/* Return the amount of memory needed for second phase of processor
+ * self-test and the required alignment of memory.
+ */
+static inline s64
+ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
+	if (bytes_needed)
+		*bytes_needed = iprv.v0;
+	if (alignment)
+		*alignment = iprv.v1;
+	return iprv.status;
+}
+
+typedef union pal_perf_mon_info_u {
+	u64			  ppmi_data;
+	struct {
+	       u64		generic		: 8,
+				width		: 8,
+				cycles		: 8,
+				retired		: 8,
+				reserved	: 32;
+	} pal_perf_mon_info_s;
+} pal_perf_mon_info_u_t;
+
+/* Return the performance monitor information about what can be counted
+ * and how to configure the monitors to count the desired events.
+ */
+static inline s64
+ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
+	if (pm_info)
+		pm_info->ppmi_data = iprv.v0;
+	return iprv.status;
+}
+
+/* Specifies the physical address of the processor interrupt block
+ * and I/O port space.
+ */
+static inline s64
+ia64_pal_platform_addr (u64 type, u64 physical_addr)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
+	return iprv.status;
+}
+
+/* Set the SAL PMI entrypoint in memory */
+static inline s64
+ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
+	return iprv.status;
+}
+
+struct pal_features_s;
+/* Provide information about configurable processor features */
+static inline s64
+ia64_pal_proc_get_features (u64 *features_avail,
+			    u64 *features_status,
+			    u64 *features_control,
+			    u64 features_set)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, features_set, 0);
+	if (iprv.status == 0) {
+		*features_avail   = iprv.v0;
+		*features_status  = iprv.v1;
+		*features_control = iprv.v2;
+	}
+	return iprv.status;
+}
+
+/* Enable/disable processor dependent features */
+static inline s64
+ia64_pal_proc_set_features (u64 feature_select)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+	return iprv.status;
+}
+
+/*
+ * Put everything in a struct so we avoid the global offset table whenever
+ * possible.
+ */
+typedef struct ia64_ptce_info_s {
+	unsigned long	base;
+	u32		count[2];
+	u32		stride[2];
+} ia64_ptce_info_t;
+
+/* Return the information required for the architected loop used to purge
+ * (initialize) the entire TC
+ */
+static inline s64
+ia64_get_ptce (ia64_ptce_info_t *ptce)
+{
+	struct ia64_pal_retval iprv;
+
+	if (!ptce)
+		return -1;
+
+	PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
+	if (iprv.status == 0) {
+		ptce->base = iprv.v0;
+		ptce->count[0] = iprv.v1 >> 32;
+		ptce->count[1] = iprv.v1 & 0xffffffff;
+		ptce->stride[0] = iprv.v2 >> 32;
+		ptce->stride[1] = iprv.v2 & 0xffffffff;
+	}
+	return iprv.status;
+}
+
+/* Return info about implemented application and control registers. */
+static inline s64
+ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
+	if (reg_info_1)
+		*reg_info_1 = iprv.v0;
+	if (reg_info_2)
+		*reg_info_2 = iprv.v1;
+	return iprv.status;
+}
+
+typedef union pal_hints_u {
+	unsigned long		ph_data;
+	struct {
+	       unsigned long	si		: 1,
+				li		: 1,
+				reserved	: 62;
+	} pal_hints_s;
+} pal_hints_u_t;
+
+/* Return information about the register stack and RSE for this processor
+ * implementation.
+ */
+static inline long ia64_pal_rse_info(unsigned long *num_phys_stacked,
+							pal_hints_u_t *hints)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
+	if (num_phys_stacked)
+		*num_phys_stacked = iprv.v0;
+	if (hints)
+		hints->ph_data = iprv.v1;
+	return iprv.status;
+}
+
+/*
+ * Set the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_set_hw_policy (u64 policy)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0);
+	return iprv.status;
+}
+
+/* Cause the processor to enter	SHUTDOWN state, where prefetching and execution are
+ * suspended, but cause cache and TLB coherency to be maintained.
+ * This is usually called in IA-32 mode.
+ */
+static inline s64
+ia64_pal_shutdown (void)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
+	return iprv.status;
+}
+
+/* Perform the second phase of processor self-test. */
+static inline s64
+ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
+	if (self_test_state)
+		*self_test_state = iprv.v0;
+	return iprv.status;
+}
+
+typedef union  pal_version_u {
+	u64	pal_version_val;
+	struct {
+		u64	pv_pal_b_rev		:	8;
+		u64	pv_pal_b_model		:	8;
+		u64	pv_reserved1		:	8;
+		u64	pv_pal_vendor		:	8;
+		u64	pv_pal_a_rev		:	8;
+		u64	pv_pal_a_model		:	8;
+		u64	pv_reserved2		:	16;
+	} pal_version_s;
+} pal_version_u_t;
+
+
+/*
+ * Return PAL version information.  While the documentation states that
+ * PAL_VERSION can be called in either physical or virtual mode, some
+ * implementations only allow physical calls.  We don't call it very often,
+ * so the overhead isn't worth eliminating.
+ */
+static inline s64
+ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+	if (pal_min_version)
+		pal_min_version->pal_version_val = iprv.v0;
+
+	if (pal_cur_version)
+		pal_cur_version->pal_version_val = iprv.v1;
+
+	return iprv.status;
+}
+
+typedef union pal_tc_info_u {
+	u64			pti_val;
+	struct {
+	       u64		num_sets	:	8,
+				associativity	:	8,
+				num_entries	:	16,
+				pf		:	1,
+				unified		:	1,
+				reduce_tr	:	1,
+				reserved	:	29;
+	} pal_tc_info_s;
+} pal_tc_info_u_t;
+
+#define tc_reduce_tr		pal_tc_info_s.reduce_tr
+#define tc_unified		pal_tc_info_s.unified
+#define tc_pf			pal_tc_info_s.pf
+#define tc_num_entries		pal_tc_info_s.num_entries
+#define tc_associativity	pal_tc_info_s.associativity
+#define tc_num_sets		pal_tc_info_s.num_sets
+
+
+/* Return information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_info (u64 tc_level, u64 tc_type,  pal_tc_info_u_t *tc_info, u64 *tc_pages)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
+	if (tc_info)
+		tc_info->pti_val = iprv.v0;
+	if (tc_pages)
+		*tc_pages = iprv.v1;
+	return iprv.status;
+}
+
+/* Get page size information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64 ia64_pal_vm_page_size(u64 *tr_pages, u64 *vw_pages)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
+	if (tr_pages)
+		*tr_pages = iprv.v0;
+	if (vw_pages)
+		*vw_pages = iprv.v1;
+	return iprv.status;
+}
+
+typedef union pal_vm_info_1_u {
+	u64			pvi1_val;
+	struct {
+		u64		vw		: 1,
+				phys_add_size	: 7,
+				key_size	: 8,
+				max_pkr		: 8,
+				hash_tag_id	: 8,
+				max_dtr_entry	: 8,
+				max_itr_entry	: 8,
+				max_unique_tcs	: 8,
+				num_tc_levels	: 8;
+	} pal_vm_info_1_s;
+} pal_vm_info_1_u_t;
+
+#define PAL_MAX_PURGES		0xFFFF		/* all ones is means unlimited */
+
+typedef union pal_vm_info_2_u {
+	u64			pvi2_val;
+	struct {
+		u64		impl_va_msb	: 8,
+				rid_size	: 8,
+				max_purges	: 16,
+				reserved	: 32;
+	} pal_vm_info_2_s;
+} pal_vm_info_2_u_t;
+
+/* Get summary information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
+	if (vm_info_1)
+		vm_info_1->pvi1_val = iprv.v0;
+	if (vm_info_2)
+		vm_info_2->pvi2_val = iprv.v1;
+	return iprv.status;
+}
+
+typedef union pal_vp_info_u {
+	u64			pvi_val;
+	struct {
+		u64		index:		48,	/* virtual feature set info */
+				vmm_id:		16;	/* feature set id */
+	} pal_vp_info_s;
+} pal_vp_info_u_t;
+
+/*
+ * Returns information about virtual processor features
+ */
+static inline s64
+ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
+	if (vp_info)
+		*vp_info = iprv.v0;
+	if (vmm_id)
+		*vmm_id = iprv.v1;
+	return iprv.status;
+}
+
+typedef union pal_itr_valid_u {
+	u64			piv_val;
+	struct {
+	       u64		access_rights_valid	: 1,
+				priv_level_valid	: 1,
+				dirty_bit_valid		: 1,
+				mem_attr_valid		: 1,
+				reserved		: 60;
+	} pal_tr_valid_s;
+} pal_tr_valid_u_t;
+
+/* Read a translation register */
+static inline s64
+ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer));
+	if (tr_valid)
+		tr_valid->piv_val = iprv.v0;
+	return iprv.status;
+}
+
+/*
+ * PAL_PREFETCH_VISIBILITY transaction types
+ */
+#define PAL_VISIBILITY_VIRTUAL		0
+#define PAL_VISIBILITY_PHYSICAL		1
+
+/*
+ * PAL_PREFETCH_VISIBILITY return codes
+ */
+#define PAL_VISIBILITY_OK		1
+#define PAL_VISIBILITY_OK_REMOTE_NEEDED	0
+#define PAL_VISIBILITY_INVAL_ARG	-2
+#define PAL_VISIBILITY_ERROR		-3
+
+static inline s64
+ia64_pal_prefetch_visibility (s64 trans_type)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
+	return iprv.status;
+}
+
+/* data structure for getting information on logical to physical mappings */
+typedef union pal_log_overview_u {
+	struct {
+		u64	num_log		:16,	/* Total number of logical
+						 * processors on this die
+						 */
+			tpc		:8,	/* Threads per core */
+			reserved3	:8,	/* Reserved */
+			cpp		:8,	/* Cores per processor */
+			reserved2	:8,	/* Reserved */
+			ppid		:8,	/* Physical processor ID */
+			reserved1	:8;	/* Reserved */
+	} overview_bits;
+	u64 overview_data;
+} pal_log_overview_t;
+
+typedef union pal_proc_n_log_info1_u{
+	struct {
+		u64	tid		:16,	/* Thread id */
+			reserved2	:16,	/* Reserved */
+			cid		:16,	/* Core id */
+			reserved1	:16;	/* Reserved */
+	} ppli1_bits;
+	u64	ppli1_data;
+} pal_proc_n_log_info1_t;
+
+typedef union pal_proc_n_log_info2_u {
+	struct {
+		u64	la		:16,	/* Logical address */
+			reserved	:48;	/* Reserved */
+	} ppli2_bits;
+	u64	ppli2_data;
+} pal_proc_n_log_info2_t;
+
+typedef struct pal_logical_to_physical_s
+{
+	pal_log_overview_t overview;
+	pal_proc_n_log_info1_t ppli1;
+	pal_proc_n_log_info2_t ppli2;
+} pal_logical_to_physical_t;
+
+#define overview_num_log	overview.overview_bits.num_log
+#define overview_tpc		overview.overview_bits.tpc
+#define overview_cpp		overview.overview_bits.cpp
+#define overview_ppid		overview.overview_bits.ppid
+#define log1_tid		ppli1.ppli1_bits.tid
+#define log1_cid		ppli1.ppli1_bits.cid
+#define log2_la			ppli2.ppli2_bits.la
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
+{
+	struct ia64_pal_retval iprv;
+
+	PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
+
+	if (iprv.status == PAL_STATUS_SUCCESS)
+	{
+		mapping->overview.overview_data = iprv.v0;
+		mapping->ppli1.ppli1_data = iprv.v1;
+		mapping->ppli2.ppli2_data = iprv.v2;
+	}
+
+	return iprv.status;
+}
+
+typedef struct pal_cache_shared_info_s
+{
+	u64 num_shared;
+	pal_proc_n_log_info1_t ppli1;
+	pal_proc_n_log_info2_t ppli2;
+} pal_cache_shared_info_t;
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_cache_shared_info(u64 level,
+		u64 type,
+		u64 proc_number,
+		pal_cache_shared_info_t *info)
+{
+	struct ia64_pal_retval iprv;
+
+	PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
+
+	if (iprv.status == PAL_STATUS_SUCCESS) {
+		info->num_shared = iprv.v0;
+		info->ppli1.ppli1_data = iprv.v1;
+		info->ppli2.ppli2_data = iprv.v2;
+	}
+
+	return iprv.status;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PAL_H */
diff --git a/arch/ia64/include/asm/param.h b/arch/ia64/include/asm/param.h
new file mode 100644
index 0000000..1295913
--- /dev/null
+++ b/arch/ia64/include/asm/param.h
@@ -0,0 +1,17 @@
+/*
+ * Fundamental kernel parameters.
+ *
+ * Based on <asm-i386/param.h>.
+ *
+ * Modified 1998, 1999, 2002-2003
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_PARAM_H
+#define _ASM_IA64_PARAM_H
+
+#include <uapi/asm/param.h>
+
+# define HZ		CONFIG_HZ
+# define USER_HZ	HZ
+# define CLOCKS_PER_SEC	HZ	/* frequency at which times() counts */
+#endif /* _ASM_IA64_PARAM_H */
diff --git a/arch/ia64/include/asm/parport.h b/arch/ia64/include/asm/parport.h
new file mode 100644
index 0000000..638b4d2
--- /dev/null
+++ b/arch/ia64/include/asm/parport.h
@@ -0,0 +1,19 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000  Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_IA64_PARPORT_H
+#define _ASM_IA64_PARPORT_H 1
+
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+	return parport_pc_find_isa_ports(autoirq, autodma);
+}
+
+#endif /* _ASM_IA64_PARPORT_H */
diff --git a/arch/ia64/include/asm/patch.h b/arch/ia64/include/asm/patch.h
new file mode 100644
index 0000000..295fe6a
--- /dev/null
+++ b/arch/ia64/include/asm/patch.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_PATCH_H
+#define _ASM_IA64_PATCH_H
+
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * There are a number of reasons for patching instructions.  Rather than duplicating code
+ * all over the place, we put the common stuff here.  Reasons for patching: in-kernel
+ * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
+ * shared library.  Undoubtedly, some of these reasons will disappear and others will
+ * be added over time.
+ */
+#include <linux/elf.h>
+#include <linux/types.h>
+
+extern void ia64_patch (u64 insn_addr, u64 mask, u64 val);	/* patch any insn slot */
+extern void ia64_patch_imm64 (u64 insn_addr, u64 val);		/* patch "movl" w/abs. value*/
+extern void ia64_patch_imm60 (u64 insn_addr, u64 val);		/* patch "brl" w/ip-rel value */
+
+extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
+extern void ia64_patch_vtop (unsigned long start, unsigned long end);
+extern void ia64_patch_phys_stack_reg(unsigned long val);
+extern void ia64_patch_rse (unsigned long start, unsigned long end);
+extern void ia64_patch_gate (void);
+
+#endif /* _ASM_IA64_PATCH_H */
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
new file mode 100644
index 0000000..07039d1
--- /dev/null
+++ b/arch/ia64/include/asm/pci.h
@@ -0,0 +1,96 @@
+#ifndef _ASM_IA64_PCI_H
+#define _ASM_IA64_PCI_H
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+
+struct pci_vector_struct {
+	__u16 segment;	/* PCI Segment number */
+	__u16 bus;	/* PCI Bus number */
+	__u32 pci_id;	/* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
+	__u8 pin;	/* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
+	__u32 irq;	/* IRQ assigned */
+};
+
+/*
+ * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
+ * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
+ * loader.
+ */
+#define pcibios_assign_all_busses()     0
+
+#define PCIBIOS_MIN_IO		0x1000
+#define PCIBIOS_MIN_MEM		0x10000000
+
+void pcibios_config_init(void);
+
+struct pci_dev;
+
+/*
+ * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
+ * correspondence between device bus addresses and CPU physical addresses.
+ * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
+ * bounce buffer handling code in the block and network device layers.
+ * Platforms with separate bus address spaces _must_ turn this off and provide
+ * a device DMA mapping implementation that takes care of the necessary
+ * address translation.
+ *
+ * For now, the ia64 platforms which may have separate/multiple bus address
+ * spaces all have I/O MMUs which support the merging of physically
+ * discontiguous buffers, so we can use that as the sole factor to determine
+ * the setting of PCI_DMA_BUS_IS_PHYS.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
+
+#include <asm-generic/pci-dma-compat.h>
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
+				enum pci_mmap_state mmap_state, int write_combine);
+#define HAVE_PCI_LEGACY
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+				      struct vm_area_struct *vma,
+				      enum pci_mmap_state mmap_state);
+
+#define pci_get_legacy_mem platform_pci_get_legacy_mem
+#define pci_legacy_read platform_pci_legacy_read
+#define pci_legacy_write platform_pci_legacy_write
+
+struct pci_controller {
+	struct acpi_device *companion;
+	void *iommu;
+	int segment;
+	int node;		/* nearest node with memory or NUMA_NO_NODE for global allocation */
+
+	void *platform_data;
+};
+
+
+#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
+#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
+
+extern struct pci_ops pci_root_ops;
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+	return (pci_domain_nr(bus) != 0);
+}
+
+#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+	return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
+}
+
+#ifdef CONFIG_INTEL_IOMMU
+extern void pci_iommu_alloc(void);
+#endif
+#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
new file mode 100644
index 0000000..0ec484d
--- /dev/null
+++ b/arch/ia64/include/asm/percpu.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_IA64_PERCPU_H
+#define _ASM_IA64_PERCPU_H
+
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
+
+#ifdef __ASSEMBLY__
+# define THIS_CPU(var)	(var)  /* use this to mark accesses to per-CPU variables... */
+#else /* !__ASSEMBLY__ */
+
+
+#include <linux/threads.h>
+
+#ifdef CONFIG_SMP
+
+#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
+# define PER_CPU_ATTRIBUTES	__attribute__((__model__ (__small__)))
+#endif
+
+#define __my_cpu_offset	__ia64_per_cpu_var(local_per_cpu_offset)
+
+extern void *per_cpu_init(void);
+
+#else /* ! SMP */
+
+#define per_cpu_init()				(__phys_per_cpu_start)
+
+#endif	/* SMP */
+
+#define PER_CPU_BASE_SECTION ".data..percpu"
+
+/*
+ * Be extremely careful when taking the address of this variable!  Due to virtual
+ * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
+ * more efficient.
+ */
+#define __ia64_per_cpu_var(var) (*({					\
+	__verify_pcpu_ptr(&(var));					\
+	((typeof(var) __kernel __force *)&(var));			\
+}))
+
+#include <asm-generic/percpu.h>
+
+/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
+DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PERCPU_H */
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
new file mode 100644
index 0000000..15476dd
--- /dev/null
+++ b/arch/ia64/include/asm/perfmon.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ *               Stephane Eranian <eranian@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_PERFMON_H
+#define _ASM_IA64_PERFMON_H
+
+#include <uapi/asm/perfmon.h>
+
+
+extern long perfmonctl(int fd, int cmd, void *arg, int narg);
+
+typedef struct {
+	void (*handler)(int irq, void *arg, struct pt_regs *regs);
+} pfm_intr_handler_desc_t;
+
+extern void pfm_save_regs (struct task_struct *);
+extern void pfm_load_regs (struct task_struct *);
+
+extern void pfm_exit_thread(struct task_struct *);
+extern int  pfm_use_debug_registers(struct task_struct *);
+extern int  pfm_release_debug_registers(struct task_struct *);
+extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
+extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
+extern void pfm_init_percpu(void);
+extern void pfm_handle_work(void);
+extern int  pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
+extern int  pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
+
+
+
+/*
+ * Reset PMD register flags
+ */
+#define PFM_PMD_SHORT_RESET	0
+#define PFM_PMD_LONG_RESET	1
+
+typedef union {
+	unsigned int val;
+	struct {
+		unsigned int notify_user:1;	/* notify user program of overflow */
+		unsigned int reset_ovfl_pmds:1;	/* reset overflowed PMDs */
+		unsigned int block_task:1;	/* block monitored task on kernel exit */
+		unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
+		unsigned int reserved:28;	/* for future use */
+	} bits;
+} pfm_ovfl_ctrl_t;
+
+typedef struct {
+	unsigned char	ovfl_pmd;			/* index of overflowed PMD  */
+	unsigned char   ovfl_notify;			/* =1 if monitor requested overflow notification */
+	unsigned short  active_set;			/* event set active at the time of the overflow */
+	pfm_ovfl_ctrl_t ovfl_ctrl;			/* return: perfmon controls to set by handler */
+
+	unsigned long   pmd_last_reset;			/* last reset value of of the PMD */
+	unsigned long	smpl_pmds[4];			/* bitmask of other PMD of interest on overflow */
+	unsigned long   smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
+	unsigned long   pmd_value;			/* current 64-bit value of the PMD */
+	unsigned long	pmd_eventid;			/* eventid associated with PMD */
+} pfm_ovfl_arg_t;
+
+
+typedef struct {
+	char		*fmt_name;
+	pfm_uuid_t	fmt_uuid;
+	size_t		fmt_arg_size;
+	unsigned long	fmt_flags;
+
+	int		(*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
+	int		(*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
+	int 		(*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
+	int		(*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
+	int		(*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+	int		(*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+	int		(*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
+
+	struct list_head fmt_list;
+} pfm_buffer_fmt_t;
+
+extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
+extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
+
+/*
+ * perfmon interface exported to modules
+ */
+extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+
+/*
+ * describe the content of the local_cpu_date->pfm_syst_info field
+ */
+#define PFM_CPUINFO_SYST_WIDE	0x1	/* if set a system wide session exists */
+#define PFM_CPUINFO_DCR_PP	0x2	/* if set the system wide session has started */
+#define PFM_CPUINFO_EXCL_IDLE	0x4	/* the system wide session excludes the idle task */
+
+/*
+ * sysctl control structure. visible to sampling formats
+ */
+typedef struct {
+	int	debug;		/* turn on/off debugging via syslog */
+	int	debug_ovfl;	/* turn on/off debug printk in overflow handler */
+	int	fastctxsw;	/* turn on/off fast (unsecure) ctxsw */
+	int	expert_mode;	/* turn on/off value checking */
+} pfm_sysctl_t;
+extern pfm_sysctl_t pfm_sysctl;
+
+
+#endif /* _ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
new file mode 100644
index 0000000..f5e70e9
--- /dev/null
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -0,0 +1,125 @@
+#ifndef _ASM_IA64_PGALLOC_H
+#define _ASM_IA64_PGALLOC_H
+
+/*
+ * This file contains the functions and defines necessary to allocate
+ * page tables.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
+ */
+
+
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/threads.h>
+#include <linux/quicklist.h>
+
+#include <asm/mmu_context.h>
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	quicklist_free(0, NULL, pgd);
+}
+
+#if CONFIG_PGTABLE_LEVELS == 4
+static inline void
+pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+{
+	pgd_val(*pgd_entry) = __pa(pud);
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+	quicklist_free(0, NULL, pud);
+}
+#define __pud_free_tlb(tlb, pud, address)	pud_free((tlb)->mm, pud)
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+
+static inline void
+pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+{
+	pud_val(*pud_entry) = __pa(pmd);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+	quicklist_free(0, NULL, pmd);
+}
+
+#define __pmd_free_tlb(tlb, pmd, address)	pmd_free((tlb)->mm, pmd)
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
+{
+	pmd_val(*pmd_entry) = page_to_phys(pte);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
+{
+	pmd_val(*pmd_entry) = __pa(pte);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	struct page *page;
+	void *pg;
+
+	pg = quicklist_alloc(0, GFP_KERNEL, NULL);
+	if (!pg)
+		return NULL;
+	page = virt_to_page(pg);
+	if (!pgtable_page_ctor(page)) {
+		quicklist_free(0, NULL, pg);
+		return NULL;
+	}
+	return page;
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					  unsigned long addr)
+{
+	return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+	pgtable_page_dtor(pte);
+	quicklist_free_page(0, NULL, pte);
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	quicklist_free(0, NULL, pte);
+}
+
+static inline void check_pgt_cache(void)
+{
+	quicklist_trim(0, NULL, 25, 16);
+}
+
+#define __pte_free_tlb(tlb, pte, address)	pte_free((tlb)->mm, pte)
+
+#endif				/* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
new file mode 100644
index 0000000..9f3ed9e
--- /dev/null
+++ b/arch/ia64/include/asm/pgtable.h
@@ -0,0 +1,594 @@
+#ifndef _ASM_IA64_PGTABLE_H
+#define _ASM_IA64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the IA-64 page table tree.
+ *
+ * This hopefully works with any (fixed) IA-64 page-size, as defined
+ * in <asm/page.h>.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/types.h>
+
+#define IA64_MAX_PHYS_BITS	50	/* max. number of physical address bits (architected) */
+
+/*
+ * First, define the various bits in a PTE.  Note that the PTE format
+ * matches the VHPT short format, the firt doubleword of the VHPD long
+ * format, and the first doubleword of the TLB insertion format.
+ */
+#define _PAGE_P_BIT		0
+#define _PAGE_A_BIT		5
+#define _PAGE_D_BIT		6
+
+#define _PAGE_P			(1 << _PAGE_P_BIT)	/* page present bit */
+#define _PAGE_MA_WB		(0x0 <<  2)	/* write back memory attribute */
+#define _PAGE_MA_UC		(0x4 <<  2)	/* uncacheable memory attribute */
+#define _PAGE_MA_UCE		(0x5 <<  2)	/* UC exported attribute */
+#define _PAGE_MA_WC		(0x6 <<  2)	/* write coalescing memory attribute */
+#define _PAGE_MA_NAT		(0x7 <<  2)	/* not-a-thing attribute */
+#define _PAGE_MA_MASK		(0x7 <<  2)
+#define _PAGE_PL_0		(0 <<  7)	/* privilege level 0 (kernel) */
+#define _PAGE_PL_1		(1 <<  7)	/* privilege level 1 (unused) */
+#define _PAGE_PL_2		(2 <<  7)	/* privilege level 2 (unused) */
+#define _PAGE_PL_3		(3 <<  7)	/* privilege level 3 (user) */
+#define _PAGE_PL_MASK		(3 <<  7)
+#define _PAGE_AR_R		(0 <<  9)	/* read only */
+#define _PAGE_AR_RX		(1 <<  9)	/* read & execute */
+#define _PAGE_AR_RW		(2 <<  9)	/* read & write */
+#define _PAGE_AR_RWX		(3 <<  9)	/* read, write & execute */
+#define _PAGE_AR_R_RW		(4 <<  9)	/* read / read & write */
+#define _PAGE_AR_RX_RWX		(5 <<  9)	/* read & exec / read, write & exec */
+#define _PAGE_AR_RWX_RW		(6 <<  9)	/* read, write & exec / read & write */
+#define _PAGE_AR_X_RX		(7 <<  9)	/* exec & promote / read & exec */
+#define _PAGE_AR_MASK		(7 <<  9)
+#define _PAGE_AR_SHIFT		9
+#define _PAGE_A			(1 << _PAGE_A_BIT)	/* page accessed bit */
+#define _PAGE_D			(1 << _PAGE_D_BIT)	/* page dirty bit */
+#define _PAGE_PPN_MASK		(((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
+#define _PAGE_ED		(__IA64_UL(1) << 52)	/* exception deferral */
+#define _PAGE_PROTNONE		(__IA64_UL(1) << 63)
+
+#define _PFN_MASK		_PAGE_PPN_MASK
+/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
+#define _PAGE_CHG_MASK	(_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
+
+#define _PAGE_SIZE_4K	12
+#define _PAGE_SIZE_8K	13
+#define _PAGE_SIZE_16K	14
+#define _PAGE_SIZE_64K	16
+#define _PAGE_SIZE_256K	18
+#define _PAGE_SIZE_1M	20
+#define _PAGE_SIZE_4M	22
+#define _PAGE_SIZE_16M	24
+#define _PAGE_SIZE_64M	26
+#define _PAGE_SIZE_256M	28
+#define _PAGE_SIZE_1G	30
+#define _PAGE_SIZE_4G	32
+
+#define __ACCESS_BITS		_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
+#define __DIRTY_BITS_NO_ED	_PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
+#define __DIRTY_BITS		_PAGE_ED | __DIRTY_BITS_NO_ED
+
+/*
+ * How many pointers will a page table level hold expressed in shift
+ */
+#define PTRS_PER_PTD_SHIFT	(PAGE_SHIFT-3)
+
+/*
+ * Definitions for fourth level:
+ */
+#define PTRS_PER_PTE	(__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * Definitions for third level:
+ *
+ * PMD_SHIFT determines the size of the area a third-level page table
+ * can map.
+ */
+#define PMD_SHIFT	(PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+#define PTRS_PER_PMD	(1UL << (PTRS_PER_PTD_SHIFT))
+
+#if CONFIG_PGTABLE_LEVELS == 4
+/*
+ * Definitions for second level:
+ *
+ * PUD_SHIFT determines the size of the area a second-level page table
+ * can map.
+ */
+#define PUD_SHIFT	(PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PUD_SIZE	(1UL << PUD_SHIFT)
+#define PUD_MASK	(~(PUD_SIZE-1))
+#define PTRS_PER_PUD	(1UL << (PTRS_PER_PTD_SHIFT))
+#endif
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
+ */
+#if CONFIG_PGTABLE_LEVELS == 4
+#define PGDIR_SHIFT		(PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#else
+#define PGDIR_SHIFT		(PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#endif
+#define PGDIR_SIZE		(__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD_SHIFT	PTRS_PER_PTD_SHIFT
+#define PTRS_PER_PGD		(1UL << PTRS_PER_PGD_SHIFT)
+#define USER_PTRS_PER_PGD	(5*PTRS_PER_PGD/8)	/* regions 0-4 are user regions */
+#define FIRST_USER_ADDRESS	0UL
+
+/*
+ * All the normal masks have the "page accessed" bits on, as any time
+ * they are used, the page is accessed. They are cleared only by the
+ * page-out routines.
+ */
+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_A)
+#define PAGE_SHARED	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define PAGE_READONLY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY_EXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define PAGE_GATE	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+#define PAGE_KERNEL	__pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
+#define PAGE_KERNELRX	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+#define PAGE_KERNEL_UC	__pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX | \
+				 _PAGE_MA_UC)
+
+# ifndef __ASSEMBLY__
+
+#include <linux/sched.h>	/* for mm_struct */
+#include <linux/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Next come the mappings that determine how mmap() protection bits
+ * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented.  The
+ * _P version gets used for a private shared memory segment, the _S
+ * version gets used for a shared memory segment with MAP_SHARED on.
+ * In a private shared memory segment, we do a copy-on-write if a task
+ * attempts to write to the page.
+ */
+	/* xwr */
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY
+#define __P010	PAGE_READONLY	/* write to priv pg -> copy & make writable */
+#define __P011	PAGE_READONLY	/* ditto */
+#define __P100	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __P101	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P110	PAGE_COPY_EXEC
+#define __P111	PAGE_COPY_EXEC
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED	/* we don't have (and don't need) write-only */
+#define __S011	PAGE_SHARED
+#define __S100	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __S101	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __S110	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S111	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+
+#define pgd_ERROR(e)	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+#if CONFIG_PGTABLE_LEVELS == 4
+#define pud_ERROR(e)	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pmd_ERROR(e)	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pte_ERROR(e)	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page addresses:
+ */
+
+
+/* Quick test to see if ADDR is a (potentially) valid physical address. */
+static inline long
+ia64_phys_addr_valid (unsigned long addr)
+{
+	return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
+}
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory.  For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error.  Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for IA-64.
+ */
+#define kern_addr_valid(addr)	(1)
+
+
+/*
+ * Now come the defines and routines to manage and access the three-level
+ * page table.
+ */
+
+
+#define VMALLOC_START		(RGN_BASE(RGN_GATE) + 0x200000000UL)
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define VMALLOC_END_INIT	(RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
+extern unsigned long VMALLOC_END;
+#else
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
+/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
+# define VMALLOC_END		(RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
+# define vmemmap		((struct page *)VMALLOC_END)
+#else
+# define VMALLOC_END		(RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
+#endif
+#endif
+
+/* fs/proc/kcore.c */
+#define	kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
+#define	kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
+
+#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
+#define RGN_MAP_LIMIT	((1UL << RGN_MAP_SHIFT) - PAGE_SIZE)	/* per region addr limit */
+
+/*
+ * Conversion functions: convert page frame number (pfn) and a protection value to a page
+ * table entry (pte).
+ */
+#define pfn_pte(pfn, pgprot) \
+({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
+
+/* Extract pfn from pte.  */
+#define pte_pfn(_pte)		((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
+
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+
+#define pte_modify(_pte, newprot) \
+	(__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
+
+#define pte_none(pte) 			(!pte_val(pte))
+#define pte_present(pte)		(pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,pte)		(pte_val(*(pte)) = 0UL)
+/* pte_page() returns the "struct page *" corresponding to the PTE: */
+#define pte_page(pte)			virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
+
+#define pmd_none(pmd)			(!pmd_val(pmd))
+#define pmd_bad(pmd)			(!ia64_phys_addr_valid(pmd_val(pmd)))
+#define pmd_present(pmd)		(pmd_val(pmd) != 0UL)
+#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
+#define pmd_page_vaddr(pmd)		((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
+#define pmd_page(pmd)			virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
+
+#define pud_none(pud)			(!pud_val(pud))
+#define pud_bad(pud)			(!ia64_phys_addr_valid(pud_val(pud)))
+#define pud_present(pud)		(pud_val(pud) != 0UL)
+#define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
+#define pud_page_vaddr(pud)		((unsigned long) __va(pud_val(pud) & _PFN_MASK))
+#define pud_page(pud)			virt_to_page((pud_val(pud) + PAGE_OFFSET))
+
+#if CONFIG_PGTABLE_LEVELS == 4
+#define pgd_none(pgd)			(!pgd_val(pgd))
+#define pgd_bad(pgd)			(!ia64_phys_addr_valid(pgd_val(pgd)))
+#define pgd_present(pgd)		(pgd_val(pgd) != 0UL)
+#define pgd_clear(pgdp)			(pgd_val(*(pgdp)) = 0UL)
+#define pgd_page_vaddr(pgd)		((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
+#define pgd_page(pgd)			virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
+#endif
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+#define pte_write(pte)	((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
+#define pte_exec(pte)		((pte_val(pte) & _PAGE_AR_RX) != 0)
+#define pte_dirty(pte)		((pte_val(pte) & _PAGE_D) != 0)
+#define pte_young(pte)		((pte_val(pte) & _PAGE_A) != 0)
+#define pte_special(pte)	0
+
+/*
+ * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
+ * access rights:
+ */
+#define pte_wrprotect(pte)	(__pte(pte_val(pte) & ~_PAGE_AR_RW))
+#define pte_mkwrite(pte)	(__pte(pte_val(pte) | _PAGE_AR_RW))
+#define pte_mkold(pte)		(__pte(pte_val(pte) & ~_PAGE_A))
+#define pte_mkyoung(pte)	(__pte(pte_val(pte) | _PAGE_A))
+#define pte_mkclean(pte)	(__pte(pte_val(pte) & ~_PAGE_D))
+#define pte_mkdirty(pte)	(__pte(pte_val(pte) | _PAGE_D))
+#define pte_mkhuge(pte)		(__pte(pte_val(pte)))
+#define pte_mkspecial(pte)	(pte)
+
+/*
+ * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
+ * sync icache and dcache when we insert *new* executable page.
+ *  __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
+ * if necessary.
+ *
+ *  set_pte() is also called by the kernel, but we can expect that the kernel
+ *  flushes icache explicitly if necessary.
+ */
+#define pte_present_exec_user(pte)\
+	((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
+		(_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
+
+extern void __ia64_sync_icache_dcache(pte_t pteval);
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+	/* page is present && page is user  && page is executable
+	 * && (page swapin or new page or page migraton
+	 *	|| copy_on_write with page copying.)
+	 */
+	if (pte_present_exec_user(pteval) &&
+	    (!pte_present(*ptep) ||
+		pte_pfn(*ptep) != pte_pfn(pteval)))
+		/* load_module() calles flush_icache_range() explicitly*/
+		__ia64_sync_icache_dcache(pteval);
+	*ptep = pteval;
+}
+
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+/*
+ * Make page protection values cacheable, uncacheable, or write-
+ * combining.  Note that "protection" is really a misnomer here as the
+ * protection value contains the memory attribute bits, dirty bits, and
+ * various other bits as well.
+ */
+#define pgprot_cacheable(prot)		__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
+#define pgprot_noncached(prot)		__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
+#define pgprot_writecombine(prot)	__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+				     unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+static inline unsigned long
+pgd_index (unsigned long address)
+{
+	unsigned long region = address >> 61;
+	unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+
+	return (region << (PAGE_SHIFT - 6)) | l1index;
+}
+
+/* The offset in the 1-level directory is given by the 3 region bits
+   (61..63) and the level-1 bits.  */
+static inline pgd_t*
+pgd_offset (const struct mm_struct *mm, unsigned long address)
+{
+	return mm->pgd + pgd_index(address);
+}
+
+/* In the kernel's mapped region we completely ignore the region number
+   (since we know it's in region number 5). */
+#define pgd_offset_k(addr) \
+	(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
+/* Look up a pgd entry in the gate area.  On IA-64, the gate-area
+   resides in the kernel-mapped segment, hence we use pgd_offset_k()
+   here.  */
+#define pgd_offset_gate(mm, addr)	pgd_offset_k(addr)
+
+#if CONFIG_PGTABLE_LEVELS == 4
+/* Find an entry in the second-level page table.. */
+#define pud_offset(dir,addr) \
+	((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+#endif
+
+/* Find an entry in the third-level page table.. */
+#define pmd_offset(dir,addr) \
+	((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+/*
+ * Find an entry in the third-level page table.  This looks more complicated than it
+ * should be because some platforms place page tables in high memory.
+ */
+#define pte_index(addr)	 	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir,addr)	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir,addr)	pte_offset_kernel(dir, addr)
+#define pte_unmap(pte)			do { } while (0)
+
+/* atomic versions of the some PTE manipulations: */
+
+static inline int
+ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	if (!pte_young(*ptep))
+		return 0;
+	return test_and_clear_bit(_PAGE_A_BIT, ptep);
+#else
+	pte_t pte = *ptep;
+	if (!pte_young(pte))
+		return 0;
+	set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+	return 1;
+#endif
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	return __pte(xchg((long *) ptep, 0));
+#else
+	pte_t pte = *ptep;
+	pte_clear(mm, addr, ptep);
+	return pte;
+#endif
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+	unsigned long new, old;
+
+	do {
+		old = pte_val(*ptep);
+		new = pte_val(pte_wrprotect(__pte (old)));
+	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
+#else
+	pte_t old_pte = *ptep;
+	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+#endif
+}
+
+static inline int
+pte_same (pte_t a, pte_t b)
+{
+	return pte_val(a) == pte_val(b);
+}
+
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init (void);
+
+/*
+ * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
+ *	 bits in the swap-type field of the swap pte.  It would be nice to
+ *	 enforce that, but we can't easily include <linux/swap.h> here.
+ *	 (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
+ *
+ * Format of swap pte:
+ *	bit   0   : present bit (must be zero)
+ *	bits  1- 7: swap-type
+ *	bits  8-62: swap offset
+ *	bit  63   : _PAGE_PROTNONE bit
+ */
+#define __swp_type(entry)		(((entry).val >> 1) & 0x7f)
+#define __swp_offset(entry)		(((entry).val << 1) >> 9)
+#define __swp_entry(type,offset)	((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern struct page *zero_page_memmap_ptr;
+#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
+
+/* We provide our own get_unmapped_area to cope with VA holes for userland */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HUGETLB_PGDIR_SHIFT	(HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define HUGETLB_PGDIR_SIZE	(__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
+#define HUGETLB_PGDIR_MASK	(~(HUGETLB_PGDIR_SIZE-1))
+#endif
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Update PTEP with ENTRY, which is guaranteed to be a less
+ * restrictive PTE.  That is, ENTRY may have the ACCESSED, DIRTY, and
+ * WRITABLE bits turned on, when the value at PTEP did not.  The
+ * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
+ *
+ * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
+ * having to worry about races.  On SMP machines, there are only two
+ * cases where this is true:
+ *
+ *	(1) *PTEP has the PRESENT bit turned OFF
+ *	(2) ENTRY has the DIRTY bit turned ON
+ *
+ * On ia64, we could implement this routine with a cmpxchg()-loop
+ * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
+ * However, like on x86, we can get a more streamlined version by
+ * observing that it is OK to drop ACCESSED bit updates when
+ * SAFELY_WRITABLE is FALSE.  Besides being rare, all that would do is
+ * result in an extra Access-bit fault, which would then turn on the
+ * ACCESSED bit in the low-level fault handler (iaccess_bit or
+ * daccess_bit in ivt.S).
+ */
+#ifdef CONFIG_SMP
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({									\
+	int __changed = !pte_same(*(__ptep), __entry);			\
+	if (__changed && __safely_writable) {				\
+		set_pte(__ptep, __entry);				\
+		flush_tlb_page(__vma, __addr);				\
+	}								\
+	__changed;							\
+})
+#else
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({									\
+	int __changed = !pte_same(*(__ptep), __entry);			\
+	if (__changed) {						\
+		set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry);	\
+		flush_tlb_page(__vma, __addr);				\
+	}								\
+	__changed;							\
+})
+#endif
+
+#  ifdef CONFIG_VIRTUAL_MEM_MAP
+  /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+#   define __HAVE_ARCH_MEMMAP_INIT
+    extern void memmap_init (unsigned long size, int nid, unsigned long zone,
+			     unsigned long start_pfn);
+#  endif /* CONFIG_VIRTUAL_MEM_MAP */
+# endif /* !__ASSEMBLY__ */
+
+/*
+ * Identity-mapped regions use a large page size.  We'll call such large pages
+ * "granules".  If you can think of a better name that's unambiguous, let me
+ * know...
+ */
+#if defined(CONFIG_IA64_GRANULE_64MB)
+# define IA64_GRANULE_SHIFT	_PAGE_SIZE_64M
+#elif defined(CONFIG_IA64_GRANULE_16MB)
+# define IA64_GRANULE_SHIFT	_PAGE_SIZE_16M
+#endif
+#define IA64_GRANULE_SIZE	(1 << IA64_GRANULE_SHIFT)
+/*
+ * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
+ */
+#define KERNEL_TR_PAGE_SHIFT	_PAGE_SIZE_64M
+#define KERNEL_TR_PAGE_SIZE	(1 << KERNEL_TR_PAGE_SHIFT)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()	do { } while (0)
+
+/* These tell get_user_pages() that the first gate page is accessible from user-level.  */
+#define FIXADDR_USER_START	GATE_ADDR
+#ifdef HAVE_BUGGY_SEGREL
+# define FIXADDR_USER_END	(GATE_ADDR + 2*PAGE_SIZE)
+#else
+# define FIXADDR_USER_END	(GATE_ADDR + 2*PERCPU_PAGE_SIZE)
+#endif
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#define __HAVE_ARCH_PGD_OFFSET_GATE
+
+
+#if CONFIG_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#endif
+#include <asm-generic/pgtable.h>
+
+#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
new file mode 100644
index 0000000..ce53c50
--- /dev/null
+++ b/arch/ia64/include/asm/processor.h
@@ -0,0 +1,711 @@
+#ifndef _ASM_IA64_PROCESSOR_H
+#define _ASM_IA64_PROCESSOR_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *	Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ *
+ * 11/24/98	S.Eranian	added ia64_set_iva()
+ * 12/03/99	D. Mosberger	implement thread_saved_pc() via kernel unwind API
+ * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 support
+ */
+
+
+#include <asm/intrinsics.h>
+#include <asm/kregs.h>
+#include <asm/ptrace.h>
+#include <asm/ustack.h>
+
+#define ARCH_HAS_PREFETCH_SWITCH_STACK
+
+#define IA64_NUM_PHYS_STACK_REG	96
+#define IA64_NUM_DBG_REGS	8
+
+#define DEFAULT_MAP_BASE	__IA64_UL_CONST(0x2000000000000000)
+#define DEFAULT_TASK_SIZE	__IA64_UL_CONST(0xa000000000000000)
+
+/*
+ * TASK_SIZE really is a mis-named.  It really is the maximum user
+ * space address (plus one).  On IA-64, there are five regions of 2TB
+ * each (assuming 8KB page size), for a total of 8TB of user virtual
+ * address space.
+ */
+#define TASK_SIZE       	DEFAULT_TASK_SIZE
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	(current->thread.map_base)
+
+#define IA64_THREAD_FPH_VALID	(__IA64_UL(1) << 0)	/* floating-point high state valid? */
+#define IA64_THREAD_DBG_VALID	(__IA64_UL(1) << 1)	/* debug registers valid? */
+#define IA64_THREAD_PM_VALID	(__IA64_UL(1) << 2)	/* performance registers valid? */
+#define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */
+#define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */
+#define IA64_THREAD_MIGRATION	(__IA64_UL(1) << 5)	/* require migration
+							   sync at ctx sw */
+#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)	/* don't log any fpswa faults */
+#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)	/* send a SIGFPE for fpswa faults */
+
+#define IA64_THREAD_UAC_SHIFT	3
+#define IA64_THREAD_UAC_MASK	(IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
+#define IA64_THREAD_FPEMU_SHIFT	6
+#define IA64_THREAD_FPEMU_MASK	(IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
+
+
+/*
+ * This shift should be large enough to be able to represent 1000000000/itc_freq with good
+ * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
+ * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
+ */
+#define IA64_NSEC_PER_CYC_SHIFT	30
+
+#ifndef __ASSEMBLY__
+
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/percpu.h>
+#include <asm/rse.h>
+#include <asm/unwind.h>
+#include <linux/atomic.h>
+#ifdef CONFIG_NUMA
+#include <asm/nodedata.h>
+#endif
+
+/* like above but expressed as bitfields for more efficient access: */
+struct ia64_psr {
+	__u64 reserved0 : 1;
+	__u64 be : 1;
+	__u64 up : 1;
+	__u64 ac : 1;
+	__u64 mfl : 1;
+	__u64 mfh : 1;
+	__u64 reserved1 : 7;
+	__u64 ic : 1;
+	__u64 i : 1;
+	__u64 pk : 1;
+	__u64 reserved2 : 1;
+	__u64 dt : 1;
+	__u64 dfl : 1;
+	__u64 dfh : 1;
+	__u64 sp : 1;
+	__u64 pp : 1;
+	__u64 di : 1;
+	__u64 si : 1;
+	__u64 db : 1;
+	__u64 lp : 1;
+	__u64 tb : 1;
+	__u64 rt : 1;
+	__u64 reserved3 : 4;
+	__u64 cpl : 2;
+	__u64 is : 1;
+	__u64 mc : 1;
+	__u64 it : 1;
+	__u64 id : 1;
+	__u64 da : 1;
+	__u64 dd : 1;
+	__u64 ss : 1;
+	__u64 ri : 2;
+	__u64 ed : 1;
+	__u64 bn : 1;
+	__u64 reserved4 : 19;
+};
+
+union ia64_isr {
+	__u64  val;
+	struct {
+		__u64 code : 16;
+		__u64 vector : 8;
+		__u64 reserved1 : 8;
+		__u64 x : 1;
+		__u64 w : 1;
+		__u64 r : 1;
+		__u64 na : 1;
+		__u64 sp : 1;
+		__u64 rs : 1;
+		__u64 ir : 1;
+		__u64 ni : 1;
+		__u64 so : 1;
+		__u64 ei : 2;
+		__u64 ed : 1;
+		__u64 reserved2 : 20;
+	};
+};
+
+union ia64_lid {
+	__u64 val;
+	struct {
+		__u64  rv  : 16;
+		__u64  eid : 8;
+		__u64  id  : 8;
+		__u64  ig  : 32;
+	};
+};
+
+union ia64_tpr {
+	__u64 val;
+	struct {
+		__u64 ig0 : 4;
+		__u64 mic : 4;
+		__u64 rsv : 8;
+		__u64 mmi : 1;
+		__u64 ig1 : 47;
+	};
+};
+
+union ia64_itir {
+	__u64 val;
+	struct {
+		__u64 rv3  :  2; /* 0-1 */
+		__u64 ps   :  6; /* 2-7 */
+		__u64 key  : 24; /* 8-31 */
+		__u64 rv4  : 32; /* 32-63 */
+	};
+};
+
+union  ia64_rr {
+	__u64 val;
+	struct {
+		__u64  ve	:  1;  /* enable hw walker */
+		__u64  reserved0:  1;  /* reserved */
+		__u64  ps	:  6;  /* log page size */
+		__u64  rid	: 24;  /* region id */
+		__u64  reserved1: 32;  /* reserved */
+	};
+};
+
+/*
+ * CPU type, hardware bug flags, and per-CPU state.  Frequently used
+ * state comes earlier:
+ */
+struct cpuinfo_ia64 {
+	unsigned int softirq_pending;
+	unsigned long itm_delta;	/* # of clock cycles between clock ticks */
+	unsigned long itm_next;		/* interval timer mask value to use for next clock tick */
+	unsigned long nsec_per_cyc;	/* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
+	unsigned long unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */
+	unsigned long unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */
+	unsigned long itc_freq;		/* frequency of ITC counter */
+	unsigned long proc_freq;	/* frequency of processor */
+	unsigned long cyc_per_usec;	/* itc_freq/1000000 */
+	unsigned long ptce_base;
+	unsigned int ptce_count[2];
+	unsigned int ptce_stride[2];
+	struct task_struct *ksoftirqd;	/* kernel softirq daemon for this CPU */
+
+#ifdef CONFIG_SMP
+	unsigned long loops_per_jiffy;
+	int cpu;
+	unsigned int socket_id;	/* physical processor socket id */
+	unsigned short core_id;	/* core id */
+	unsigned short thread_id; /* thread id */
+	unsigned short num_log;	/* Total number of logical processors on
+				 * this socket that were successfully booted */
+	unsigned char cores_per_socket;	/* Cores per processor socket */
+	unsigned char threads_per_core;	/* Threads per core */
+#endif
+
+	/* CPUID-derived information: */
+	unsigned long ppn;
+	unsigned long features;
+	unsigned char number;
+	unsigned char revision;
+	unsigned char model;
+	unsigned char family;
+	unsigned char archrev;
+	char vendor[16];
+	char *model_name;
+
+#ifdef CONFIG_NUMA
+	struct ia64_node_data *node_data;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
+
+/*
+ * The "local" data variable.  It refers to the per-CPU data of the currently executing
+ * CPU, much like "current" points to the per-task data of the currently executing task.
+ * Do not use the address of local_cpu_data, since it will be different from
+ * cpu_data(smp_processor_id())!
+ */
+#define local_cpu_data		(&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))
+
+extern void print_cpu_info (struct cpuinfo_ia64 *);
+
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
+#define SET_UNALIGN_CTL(task,value)								\
+({												\
+	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)			\
+				| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK));	\
+	0;											\
+})
+#define GET_UNALIGN_CTL(task,addr)								\
+({												\
+	put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,	\
+		 (int __user *) (addr));							\
+})
+
+#define SET_FPEMU_CTL(task,value)								\
+({												\
+	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)		\
+			  | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));	\
+	0;											\
+})
+#define GET_FPEMU_CTL(task,addr)								\
+({												\
+	put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,	\
+		 (int __user *) (addr));							\
+})
+
+struct thread_struct {
+	__u32 flags;			/* various thread flags (see IA64_THREAD_*) */
+	/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
+	__u8 on_ustack;			/* executing on user-stacks? */
+	__u8 pad[3];
+	__u64 ksp;			/* kernel stack pointer */
+	__u64 map_base;			/* base address for get_unmapped_area() */
+	__u64 rbs_bot;			/* the base address for the RBS */
+	int last_fph_cpu;		/* CPU that may hold the contents of f32-f127 */
+
+#ifdef CONFIG_PERFMON
+	void *pfm_context;		     /* pointer to detailed PMU context */
+	unsigned long pfm_needs_checking;    /* when >0, pending perfmon work on kernel exit */
+# define INIT_THREAD_PM		.pfm_context =		NULL,     \
+				.pfm_needs_checking =	0UL,
+#else
+# define INIT_THREAD_PM
+#endif
+	unsigned long dbr[IA64_NUM_DBG_REGS];
+	unsigned long ibr[IA64_NUM_DBG_REGS];
+	struct ia64_fpreg fph[96];	/* saved/loaded on demand */
+};
+
+#define INIT_THREAD {						\
+	.flags =	0,					\
+	.on_ustack =	0,					\
+	.ksp =		0,					\
+	.map_base =	DEFAULT_MAP_BASE,			\
+	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
+	.last_fph_cpu =  -1,					\
+	INIT_THREAD_PM						\
+	.dbr =		{0, },					\
+	.ibr =		{0, },					\
+	.fph =		{{{{0}}}, }				\
+}
+
+#define start_thread(regs,new_ip,new_sp) do {							\
+	regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))		\
+			 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));		\
+	regs->cr_iip = new_ip;									\
+	regs->ar_rsc = 0xf;		/* eager mode, privilege level 3 */			\
+	regs->ar_rnat = 0;									\
+	regs->ar_bspstore = current->thread.rbs_bot;						\
+	regs->ar_fpsr = FPSR_DEFAULT;								\
+	regs->loadrs = 0;									\
+	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
+	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\
+	if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) {	\
+		/*										\
+		 * Zap scratch regs to avoid leaking bits between processes with different	\
+		 * uid/privileges.								\
+		 */										\
+		regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;					\
+		regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;	\
+	}											\
+} while (0)
+
+/* Forward declarations, a strange C thing... */
+struct mm_struct;
+struct task_struct;
+
+/*
+ * Free all resources held by a thread. This is called after the
+ * parent of DEAD_TASK has collected the exit status of the task via
+ * wait().
+ */
+#define release_thread(dead_task)
+
+/* Get wait channel for task P.  */
+extern unsigned long get_wchan (struct task_struct *p);
+
+/* Return instruction pointer of blocked task TSK.  */
+#define KSTK_EIP(tsk)					\
+  ({							\
+	struct pt_regs *_regs = task_pt_regs(tsk);	\
+	_regs->cr_iip + ia64_psr(_regs)->ri;		\
+  })
+
+/* Return stack pointer of blocked task TSK.  */
+#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
+
+extern void ia64_getreg_unknown_kr (void);
+extern void ia64_setreg_unknown_kr (void);
+
+#define ia64_get_kr(regnum)					\
+({								\
+	unsigned long r = 0;					\
+								\
+	switch (regnum) {					\
+	    case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;	\
+	    case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;	\
+	    case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;	\
+	    case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;	\
+	    case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;	\
+	    case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;	\
+	    case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;	\
+	    case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;	\
+	    default: ia64_getreg_unknown_kr(); break;		\
+	}							\
+	r;							\
+})
+
+#define ia64_set_kr(regnum, r) 					\
+({								\
+	switch (regnum) {					\
+	    case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;	\
+	    case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;	\
+	    case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;	\
+	    case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;	\
+	    case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;	\
+	    case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;	\
+	    case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;	\
+	    case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;	\
+	    default: ia64_setreg_unknown_kr(); break;		\
+	}							\
+})
+
+/*
+ * The following three macros can't be inline functions because we don't have struct
+ * task_struct at this point.
+ */
+
+/*
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
+#define ia64_is_local_fpu_owner(t)								\
+({												\
+	struct task_struct *__ia64_islfo_task = (t);						\
+	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\
+	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\
+})
+
+/*
+ * Mark task T as owning the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
+#define ia64_set_local_fpu_owner(t) do {						\
+	struct task_struct *__ia64_slfo_task = (t);					\
+	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\
+	ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);		\
+} while (0)
+
+/* Mark the fph partition of task T as being invalid on all CPUs.  */
+#define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)
+
+extern void __ia64_init_fpu (void);
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#define ia64_fph_enable()	do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+#define ia64_fph_disable()	do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+
+/* load fp 0.0 into fph */
+static inline void
+ia64_init_fpu (void) {
+	ia64_fph_enable();
+	__ia64_init_fpu();
+	ia64_fph_disable();
+}
+
+/* save f32-f127 at FPH */
+static inline void
+ia64_save_fpu (struct ia64_fpreg *fph) {
+	ia64_fph_enable();
+	__ia64_save_fpu(fph);
+	ia64_fph_disable();
+}
+
+/* load f32-f127 from FPH */
+static inline void
+ia64_load_fpu (struct ia64_fpreg *fph) {
+	ia64_fph_enable();
+	__ia64_load_fpu(fph);
+	ia64_fph_disable();
+}
+
+static inline __u64
+ia64_clear_ic (void)
+{
+	__u64 psr;
+	psr = ia64_getreg(_IA64_REG_PSR);
+	ia64_stop();
+	ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
+	ia64_srlz_i();
+	return psr;
+}
+
+/*
+ * Restore the psr.
+ */
+static inline void
+ia64_set_psr (__u64 psr)
+{
+	ia64_stop();
+	ia64_setreg(_IA64_REG_PSR_L, psr);
+	ia64_srlz_i();
+}
+
+/*
+ * Insert a translation into an instruction and/or data translation
+ * register.
+ */
+static inline void
+ia64_itr (__u64 target_mask, __u64 tr_num,
+	  __u64 vmaddr, __u64 pte,
+	  __u64 log_page_size)
+{
+	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+	ia64_stop();
+	if (target_mask & 0x1)
+		ia64_itri(tr_num, pte);
+	if (target_mask & 0x2)
+		ia64_itrd(tr_num, pte);
+}
+
+/*
+ * Insert a translation into the instruction and/or data translation
+ * cache.
+ */
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
+	  __u64 log_page_size)
+{
+	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+	ia64_stop();
+	/* as per EAS2.6, itc must be the last instruction in an instruction group */
+	if (target_mask & 0x1)
+		ia64_itci(pte);
+	if (target_mask & 0x2)
+		ia64_itcd(pte);
+}
+
+/*
+ * Purge a range of addresses from instruction and/or data translation
+ * register(s).
+ */
+static inline void
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
+{
+	if (target_mask & 0x1)
+		ia64_ptri(vmaddr, (log_size << 2));
+	if (target_mask & 0x2)
+		ia64_ptrd(vmaddr, (log_size << 2));
+}
+
+/* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */
+static inline void
+ia64_set_iva (void *ivt_addr)
+{
+	ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
+	ia64_srlz_i();
+}
+
+/* Set the page table address and control bits.  */
+static inline void
+ia64_set_pta (__u64 pta)
+{
+	/* Note: srlz.i implies srlz.d */
+	ia64_setreg(_IA64_REG_CR_PTA, pta);
+	ia64_srlz_i();
+}
+
+static inline void
+ia64_eoi (void)
+{
+	ia64_setreg(_IA64_REG_CR_EOI, 0);
+	ia64_srlz_d();
+}
+
+#define cpu_relax()	ia64_hint(ia64_hint_pause)
+#define cpu_relax_lowlatency() cpu_relax()
+
+static inline int
+ia64_get_irr(unsigned int vector)
+{
+	unsigned int reg = vector / 64;
+	unsigned int bit = vector % 64;
+	u64 irr;
+
+	switch (reg) {
+	case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
+	case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
+	case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
+	case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
+	}
+
+	return test_bit(bit, &irr);
+}
+
+static inline void
+ia64_set_lrr0 (unsigned long val)
+{
+	ia64_setreg(_IA64_REG_CR_LRR0, val);
+	ia64_srlz_d();
+}
+
+static inline void
+ia64_set_lrr1 (unsigned long val)
+{
+	ia64_setreg(_IA64_REG_CR_LRR1, val);
+	ia64_srlz_d();
+}
+
+
+/*
+ * Given the address to which a spill occurred, return the unat bit
+ * number that corresponds to this address.
+ */
+static inline __u64
+ia64_unat_pos (void *spill_addr)
+{
+	return ((__u64) spill_addr >> 3) & 0x3f;
+}
+
+/*
+ * Set the NaT bit of an integer register which was spilled at address
+ * SPILL_ADDR.  UNAT is the mask to be updated.
+ */
+static inline void
+ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
+{
+	__u64 bit = ia64_unat_pos(spill_addr);
+	__u64 mask = 1UL << bit;
+
+	*unat = (*unat & ~mask) | (nat << bit);
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * Note that the only way T can block is through a call to schedule() -> switch_to().
+ */
+static inline unsigned long
+thread_saved_pc (struct task_struct *t)
+{
+	struct unw_frame_info info;
+	unsigned long ip;
+
+	unw_init_from_blocked_task(&info, t);
+	if (unw_unwind(&info) < 0)
+		return 0;
+	unw_get_ip(&info, &ip);
+	return ip;
+}
+
+/*
+ * Get the current instruction/program counter value.
+ */
+#define current_text_addr() \
+	({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
+
+static inline __u64
+ia64_get_ivr (void)
+{
+	__u64 r;
+	ia64_srlz_d();
+	r = ia64_getreg(_IA64_REG_CR_IVR);
+	ia64_srlz_d();
+	return r;
+}
+
+static inline void
+ia64_set_dbr (__u64 regnum, __u64 value)
+{
+	__ia64_set_dbr(regnum, value);
+#ifdef CONFIG_ITANIUM
+	ia64_srlz_d();
+#endif
+}
+
+static inline __u64
+ia64_get_dbr (__u64 regnum)
+{
+	__u64 retval;
+
+	retval = __ia64_get_dbr(regnum);
+#ifdef CONFIG_ITANIUM
+	ia64_srlz_d();
+#endif
+	return retval;
+}
+
+static inline __u64
+ia64_rotr (__u64 w, __u64 n)
+{
+	return (w >> n) | (w << (64 - n));
+}
+
+#define ia64_rotl(w,n)	ia64_rotr((w), (64) - (n))
+
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+	void *result;
+	result = (void *) ia64_tpa(addr);
+	return __va(result);
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define PREFETCH_STRIDE			L1_CACHE_BYTES
+
+static inline void
+prefetch (const void *x)
+{
+	 ia64_lfetch(ia64_lfhint_none, x);
+}
+
+static inline void
+prefetchw (const void *x)
+{
+	ia64_lfetch_excl(ia64_lfhint_none, x);
+}
+
+#define spin_lock_prefetch(x)	prefetchw(x)
+
+extern unsigned long boot_option_idle_override;
+
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
+			 IDLE_NOMWAIT, IDLE_POLL};
+
+void default_idle(void);
+
+#define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PROCESSOR_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
new file mode 100644
index 0000000..8451439
--- /dev/null
+++ b/arch/ia64/include/asm/ptrace.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *	Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2003 Intel Co
+ *	Suresh Siddha <suresh.b.siddha@intel.com>
+ *	Fenghua Yu <fenghua.yu@intel.com>
+ *	Arun Sharma <arun.sharma@intel.com>
+ *
+ * 12/07/98	S. Eranian	added pt_regs & switch_stack
+ * 12/21/98	D. Mosberger	updated to match latest code
+ *  6/17/99	D. Mosberger	added second unat member to "struct switch_stack"
+ *
+ */
+#ifndef _ASM_IA64_PTRACE_H
+#define _ASM_IA64_PTRACE_H
+
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Base-2 logarithm of number of pages to allocate per task structure
+ * (including register backing store and memory stack):
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define KERNEL_STACK_SIZE_ORDER		3
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define KERNEL_STACK_SIZE_ORDER		2
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define KERNEL_STACK_SIZE_ORDER		1
+#else
+# define KERNEL_STACK_SIZE_ORDER		0
+#endif
+
+#define IA64_RBS_OFFSET			((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
+#define IA64_STK_OFFSET			((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
+
+#define KERNEL_STACK_SIZE		IA64_STK_OFFSET
+
+#ifndef __ASSEMBLY__
+
+#include <asm/current.h>
+#include <asm/page.h>
+
+/*
+ * We use the ia64_psr(regs)->ri to determine which of the three
+ * instructions in bundle (16 bytes) took the sample. Generate
+ * the canonical representation by adding to instruction pointer.
+ */
+# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+	/* FIXME: should this be bspstore + nr_dirty regs? */
+	return regs->ar_bspstore;
+}
+
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+	return regs->r10 != -1;
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+	if (is_syscall_success(regs))
+		return regs->r8;
+	else
+		return -regs->r8;
+}
+
+/* Conserve space in histogram by encoding slot bits in address
+ * bits 2 and 3 rather than bits 0 and 1.
+ */
+#define profile_pc(regs)						\
+({									\
+	unsigned long __ip = instruction_pointer(regs);			\
+	(__ip & ~3UL) + ((__ip & 3UL) << 2);				\
+})
+/*
+ * Why not default?  Because user_stack_pointer() on ia64 gives register
+ * stack backing store instead...
+ */
+#define current_user_stack_pointer() (current_pt_regs()->r12)
+
+  /* given a pointer to a task_struct, return the user's pt_regs */
+# define task_pt_regs(t)		(((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define ia64_psr(regs)			((struct ia64_psr *) &(regs)->cr_ipsr)
+# define user_mode(regs)		(((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
+# define user_stack(task,regs)	((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
+# define fsys_mode(task,regs)					\
+  ({								\
+	  struct task_struct *_task = (task);			\
+	  struct pt_regs *_regs = (regs);			\
+	  !user_mode(_regs) && user_stack(_task, _regs);	\
+  })
+
+  /*
+   * System call handlers that, upon successful completion, need to return a negative value
+   * should call force_successful_syscall_return() right before returning.  On architectures
+   * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
+   * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
+   * flag will not get set.  On architectures which do not support a separate error flag,
+   * the macro is a no-op and the spurious error condition needs to be filtered out by some
+   * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
+   * or something along those lines).
+   *
+   * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
+   */
+# define force_successful_syscall_return()	(task_pt_regs(current)->r8 = 0)
+
+  struct task_struct;			/* forward decl */
+  struct unw_frame_info;		/* forward decl */
+
+  extern void ia64_do_show_stack (struct unw_frame_info *, void *);
+  extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
+					      unsigned long *);
+  extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
+			 unsigned long, long *);
+  extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
+			 unsigned long, long);
+  extern void ia64_flush_fph (struct task_struct *);
+  extern void ia64_sync_fph (struct task_struct *);
+  extern void ia64_sync_krbs(void);
+  extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
+				  unsigned long, unsigned long);
+
+  /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
+  extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
+  /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
+  extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
+
+  extern void ia64_increment_ip (struct pt_regs *pt);
+  extern void ia64_decrement_ip (struct pt_regs *pt);
+
+  extern void ia64_ptrace_stop(void);
+  #define arch_ptrace_stop(code, info) \
+	ia64_ptrace_stop()
+  #define arch_ptrace_stop_needed(code, info) \
+	(!test_thread_flag(TIF_RESTORE_RSE))
+
+  extern void ptrace_attach_sync_user_rbs (struct task_struct *);
+  #define arch_ptrace_attach(child) \
+	ptrace_attach_sync_user_rbs(child)
+
+  #define arch_has_single_step()  (1)
+  #define arch_has_block_step()   (1)
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_PTRACE_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
new file mode 100644
index 0000000..3027e75
--- /dev/null
+++ b/arch/ia64/include/asm/rwsem.h
@@ -0,0 +1,145 @@
+/*
+ * R/W semaphores for ia64
+ *
+ * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
+ * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
+ *
+ * Based on asm-i386/rwsem.h and other architecture implementation.
+ *
+ * The MSW of the count is the negated number of active writers and
+ * waiting lockers, and the LSW is the total number of active locks.
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
+ * the case of an uncontended lock. Readers increment by 1 and see a positive
+ * value when uncontended, negative if there are writers (and maybe) readers
+ * waiting (in which case it goes to sleep).
+ */
+
+#ifndef _ASM_IA64_RWSEM_H
+#define _ASM_IA64_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
+#endif
+
+#include <asm/intrinsics.h>
+
+#define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000)
+#define RWSEM_ACTIVE_BIAS		(1L)
+#define RWSEM_ACTIVE_MASK		(0xffffffffL)
+#define RWSEM_WAITING_BIAS		(-0x100000000L)
+#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+/*
+ * lock for reading
+ */
+static inline void
+__down_read (struct rw_semaphore *sem)
+{
+	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
+
+	if (result < 0)
+		rwsem_down_read_failed(sem);
+}
+
+/*
+ * lock for writing
+ */
+static inline void
+__down_write (struct rw_semaphore *sem)
+{
+	long old, new;
+
+	do {
+		old = sem->count;
+		new = old + RWSEM_ACTIVE_WRITE_BIAS;
+	} while (cmpxchg_acq(&sem->count, old, new) != old);
+
+	if (old != 0)
+		rwsem_down_write_failed(sem);
+}
+
+/*
+ * unlock after reading
+ */
+static inline void
+__up_read (struct rw_semaphore *sem)
+{
+	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
+
+	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
+		rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void
+__up_write (struct rw_semaphore *sem)
+{
+	long old, new;
+
+	do {
+		old = sem->count;
+		new = old - RWSEM_ACTIVE_WRITE_BIAS;
+	} while (cmpxchg_rel(&sem->count, old, new) != old);
+
+	if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
+		rwsem_wake(sem);
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int
+__down_read_trylock (struct rw_semaphore *sem)
+{
+	long tmp;
+	while ((tmp = sem->count) >= 0) {
+		if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int
+__down_write_trylock (struct rw_semaphore *sem)
+{
+	long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
+			      RWSEM_ACTIVE_WRITE_BIAS);
+	return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void
+__downgrade_write (struct rw_semaphore *sem)
+{
+	long old, new;
+
+	do {
+		old = sem->count;
+		new = old - RWSEM_WAITING_BIAS;
+	} while (cmpxchg_rel(&sem->count, old, new) != old);
+
+	if (old < 0)
+		rwsem_downgrade_wake(sem);
+}
+
+/*
+ * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
+ * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
+ */
+#define rwsem_atomic_add(delta, sem)	atomic64_add(delta, (atomic64_t *)(&(sem)->count))
+#define rwsem_atomic_update(delta, sem)	atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
+
+#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
new file mode 100644
index 0000000..e504f38
--- /dev/null
+++ b/arch/ia64/include/asm/sal.h
@@ -0,0 +1,917 @@
+#ifndef _ASM_IA64_SAL_H
+#define _ASM_IA64_SAL_H
+
+/*
+ * System Abstraction Layer definitions.
+ *
+ * This is based on version 2.5 of the manual "IA-64 System
+ * Abstraction Layer".
+ *
+ * Copyright (C) 2001 Intel
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com>
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ *
+ * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
+ *		    revision of the SAL spec.
+ * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
+ *                  revision of the SAL spec.
+ * 99/09/29 davidm	Updated for SAL 2.6.
+ * 00/03/29 cfleck      Updated SAL Error Logging info for processor (SAL 2.6)
+ *                      (plus examples of platform error info structures from smariset @ Intel)
+ */
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT		0
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT	1
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT	2
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT	 	3
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK	  (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT	  (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bcd.h>
+#include <linux/spinlock.h>
+#include <linux/efi.h>
+
+#include <asm/pal.h>
+#include <asm/fpu.h>
+
+extern spinlock_t sal_lock;
+
+/* SAL spec _requires_ eight args for each call. */
+#define __IA64_FW_CALL(entry,result,a0,a1,a2,a3,a4,a5,a6,a7)	\
+	result = (*entry)(a0,a1,a2,a3,a4,a5,a6,a7)
+
+# define IA64_FW_CALL(entry,result,args...) do {		\
+	unsigned long __ia64_sc_flags;				\
+	struct ia64_fpreg __ia64_sc_fr[6];			\
+	ia64_save_scratch_fpregs(__ia64_sc_fr);			\
+	spin_lock_irqsave(&sal_lock, __ia64_sc_flags);		\
+	__IA64_FW_CALL(entry, result, args);			\
+	spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);	\
+	ia64_load_scratch_fpregs(__ia64_sc_fr);			\
+} while (0)
+
+# define SAL_CALL(result,args...)			\
+	IA64_FW_CALL(ia64_sal, result, args);
+
+# define SAL_CALL_NOLOCK(result,args...) do {		\
+	unsigned long __ia64_scn_flags;			\
+	struct ia64_fpreg __ia64_scn_fr[6];		\
+	ia64_save_scratch_fpregs(__ia64_scn_fr);	\
+	local_irq_save(__ia64_scn_flags);		\
+	__IA64_FW_CALL(ia64_sal, result, args);		\
+	local_irq_restore(__ia64_scn_flags);		\
+	ia64_load_scratch_fpregs(__ia64_scn_fr);	\
+} while (0)
+
+# define SAL_CALL_REENTRANT(result,args...) do {	\
+	struct ia64_fpreg __ia64_scs_fr[6];		\
+	ia64_save_scratch_fpregs(__ia64_scs_fr);	\
+	preempt_disable();				\
+	__IA64_FW_CALL(ia64_sal, result, args);		\
+	preempt_enable();				\
+	ia64_load_scratch_fpregs(__ia64_scs_fr);	\
+} while (0)
+
+#define SAL_SET_VECTORS			0x01000000
+#define SAL_GET_STATE_INFO		0x01000001
+#define SAL_GET_STATE_INFO_SIZE		0x01000002
+#define SAL_CLEAR_STATE_INFO		0x01000003
+#define SAL_MC_RENDEZ			0x01000004
+#define SAL_MC_SET_PARAMS		0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR	0x01000006
+
+#define SAL_CACHE_FLUSH			0x01000008
+#define SAL_CACHE_INIT			0x01000009
+#define SAL_PCI_CONFIG_READ		0x01000010
+#define SAL_PCI_CONFIG_WRITE		0x01000011
+#define SAL_FREQ_BASE			0x01000012
+#define SAL_PHYSICAL_ID_INFO		0x01000013
+
+#define SAL_UPDATE_PAL			0x01000020
+
+struct ia64_sal_retval {
+	/*
+	 * A zero status value indicates call completed without error.
+	 * A negative status value indicates reason of call failure.
+	 * A positive status value indicates success but an
+	 * informational value should be printed (e.g., "reboot for
+	 * change to take effect").
+	 */
+	long status;
+	unsigned long v0;
+	unsigned long v1;
+	unsigned long v2;
+};
+
+typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
+
+enum {
+	SAL_FREQ_BASE_PLATFORM = 0,
+	SAL_FREQ_BASE_INTERVAL_TIMER = 1,
+	SAL_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+/*
+ * The SAL system table is followed by a variable number of variable
+ * length descriptors.  The structure of these descriptors follows
+ * below.
+ * The defininition follows SAL specs from July 2000
+ */
+struct ia64_sal_systab {
+	u8 signature[4];	/* should be "SST_" */
+	u32 size;		/* size of this table in bytes */
+	u8 sal_rev_minor;
+	u8 sal_rev_major;
+	u16 entry_count;	/* # of entries in variable portion */
+	u8 checksum;
+	u8 reserved1[7];
+	u8 sal_a_rev_minor;
+	u8 sal_a_rev_major;
+	u8 sal_b_rev_minor;
+	u8 sal_b_rev_major;
+	/* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
+	u8 oem_id[32];
+	u8 product_id[32];	/* ASCII product id  */
+	u8 reserved2[8];
+};
+
+enum sal_systab_entry_type {
+	SAL_DESC_ENTRY_POINT = 0,
+	SAL_DESC_MEMORY = 1,
+	SAL_DESC_PLATFORM_FEATURE = 2,
+	SAL_DESC_TR = 3,
+	SAL_DESC_PTC = 4,
+	SAL_DESC_AP_WAKEUP = 5
+};
+
+/*
+ * Entry type:	Size:
+ *	0	48
+ *	1	32
+ *	2	16
+ *	3	32
+ *	4	16
+ *	5	16
+ */
+#define SAL_DESC_SIZE(type)	"\060\040\020\040\020\020"[(unsigned) type]
+
+typedef struct ia64_sal_desc_entry_point {
+	u8 type;
+	u8 reserved1[7];
+	u64 pal_proc;
+	u64 sal_proc;
+	u64 gp;
+	u8 reserved2[16];
+}ia64_sal_desc_entry_point_t;
+
+typedef struct ia64_sal_desc_memory {
+	u8 type;
+	u8 used_by_sal;	/* needs to be mapped for SAL? */
+	u8 mem_attr;		/* current memory attribute setting */
+	u8 access_rights;	/* access rights set up by SAL */
+	u8 mem_attr_mask;	/* mask of supported memory attributes */
+	u8 reserved1;
+	u8 mem_type;		/* memory type */
+	u8 mem_usage;		/* memory usage */
+	u64 addr;		/* physical address of memory */
+	u32 length;	/* length (multiple of 4KB pages) */
+	u32 reserved2;
+	u8 oem_reserved[8];
+} ia64_sal_desc_memory_t;
+
+typedef struct ia64_sal_desc_platform_feature {
+	u8 type;
+	u8 feature_mask;
+	u8 reserved1[14];
+} ia64_sal_desc_platform_feature_t;
+
+typedef struct ia64_sal_desc_tr {
+	u8 type;
+	u8 tr_type;		/* 0 == instruction, 1 == data */
+	u8 regnum;		/* translation register number */
+	u8 reserved1[5];
+	u64 addr;		/* virtual address of area covered */
+	u64 page_size;		/* encoded page size */
+	u8 reserved2[8];
+} ia64_sal_desc_tr_t;
+
+typedef struct ia64_sal_desc_ptc {
+	u8 type;
+	u8 reserved1[3];
+	u32 num_domains;	/* # of coherence domains */
+	u64 domain_info;	/* physical address of domain info table */
+} ia64_sal_desc_ptc_t;
+
+typedef struct ia64_sal_ptc_domain_info {
+	u64 proc_count;		/* number of processors in domain */
+	u64 proc_list;		/* physical address of LID array */
+} ia64_sal_ptc_domain_info_t;
+
+typedef struct ia64_sal_ptc_domain_proc_entry {
+	u64 id  : 8;		/* id of processor */
+	u64 eid : 8;		/* eid of processor */
+} ia64_sal_ptc_domain_proc_entry_t;
+
+
+#define IA64_SAL_AP_EXTERNAL_INT 0
+
+typedef struct ia64_sal_desc_ap_wakeup {
+	u8 type;
+	u8 mechanism;		/* 0 == external interrupt */
+	u8 reserved1[6];
+	u64 vector;		/* interrupt vector in range 0x10-0xff */
+} ia64_sal_desc_ap_wakeup_t ;
+
+extern ia64_sal_handler ia64_sal;
+extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
+
+extern unsigned short sal_revision;	/* supported SAL spec revision */
+extern unsigned short sal_version;	/* SAL version; OEM dependent */
+#define SAL_VERSION_CODE(major, minor) ((bin2bcd(major) << 8) | bin2bcd(minor))
+
+extern const char *ia64_sal_strerror (long status);
+extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
+
+/* SAL information type encodings */
+enum {
+	SAL_INFO_TYPE_MCA  = 0,		/* Machine check abort information */
+        SAL_INFO_TYPE_INIT = 1,		/* Init information */
+        SAL_INFO_TYPE_CMC  = 2,		/* Corrected machine check information */
+        SAL_INFO_TYPE_CPE  = 3		/* Corrected platform error information */
+};
+
+/* Encodings for machine check parameter types */
+enum {
+	SAL_MC_PARAM_RENDEZ_INT    = 1,	/* Rendezvous interrupt */
+	SAL_MC_PARAM_RENDEZ_WAKEUP = 2,	/* Wakeup */
+	SAL_MC_PARAM_CPE_INT	   = 3	/* Corrected Platform Error Int */
+};
+
+/* Encodings for rendezvous mechanisms */
+enum {
+	SAL_MC_PARAM_MECHANISM_INT = 1,	/* Use interrupt */
+	SAL_MC_PARAM_MECHANISM_MEM = 2	/* Use memory synchronization variable*/
+};
+
+/* Encodings for vectors which can be registered by the OS with SAL */
+enum {
+	SAL_VECTOR_OS_MCA	  = 0,
+	SAL_VECTOR_OS_INIT	  = 1,
+	SAL_VECTOR_OS_BOOT_RENDEZ = 2
+};
+
+/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
+#define	SAL_MC_PARAM_RZ_ALWAYS		0x1
+#define	SAL_MC_PARAM_BINIT_ESCALATE	0x10
+
+/*
+ * Definition of the SAL Error Log from the SAL spec
+ */
+
+/* SAL Error Record Section GUID Definitions */
+#define SAL_PROC_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_BUS_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
+    EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
+		0xca, 0x4d)
+
+#define MAX_CACHE_ERRORS	6
+#define MAX_TLB_ERRORS		6
+#define MAX_BUS_ERRORS		1
+
+/* Definition of version  according to SAL spec for logging purposes */
+typedef struct sal_log_revision {
+	u8 minor;		/* BCD (0..99) */
+	u8 major;		/* BCD (0..99) */
+} sal_log_revision_t;
+
+/* Definition of timestamp according to SAL spec for logging purposes */
+typedef struct sal_log_timestamp {
+	u8 slh_second;		/* Second (0..59) */
+	u8 slh_minute;		/* Minute (0..59) */
+	u8 slh_hour;		/* Hour (0..23) */
+	u8 slh_reserved;
+	u8 slh_day;		/* Day (1..31) */
+	u8 slh_month;		/* Month (1..12) */
+	u8 slh_year;		/* Year (00..99) */
+	u8 slh_century;		/* Century (19, 20, 21, ...) */
+} sal_log_timestamp_t;
+
+/* Definition of log record  header structures */
+typedef struct sal_log_record_header {
+	u64 id;				/* Unique monotonically increasing ID */
+	sal_log_revision_t revision;	/* Major and Minor revision of header */
+	u8 severity;			/* Error Severity */
+	u8 validation_bits;		/* 0: platform_guid, 1: !timestamp */
+	u32 len;			/* Length of this error log in bytes */
+	sal_log_timestamp_t timestamp;	/* Timestamp */
+	efi_guid_t platform_guid;	/* Unique OEM Platform ID */
+} sal_log_record_header_t;
+
+#define sal_log_severity_recoverable	0
+#define sal_log_severity_fatal		1
+#define sal_log_severity_corrected	2
+
+/*
+ * Error Recovery Info (ERI) bit decode.  From SAL Spec section B.2.2 Table B-3
+ * Error Section Error_Recovery_Info Field Definition.
+ */
+#define ERI_NOT_VALID		0x0	/* Error Recovery Field is not valid */
+#define ERI_NOT_ACCESSIBLE	0x30	/* Resource not accessible */
+#define ERI_CONTAINMENT_WARN	0x22	/* Corrupt data propagated */
+#define ERI_UNCORRECTED_ERROR	0x20	/* Uncorrected error */
+#define ERI_COMPONENT_RESET	0x24	/* Component must be reset */
+#define ERI_CORR_ERROR_LOG	0x21	/* Corrected error, needs logging */
+#define ERI_CORR_ERROR_THRESH	0x29	/* Corrected error threshold exceeded */
+
+/* Definition of log section header structures */
+typedef struct sal_log_sec_header {
+    efi_guid_t guid;			/* Unique Section ID */
+    sal_log_revision_t revision;	/* Major and Minor revision of Section */
+    u8 error_recovery_info;		/* Platform error recovery status */
+    u8 reserved;
+    u32 len;				/* Section length */
+} sal_log_section_hdr_t;
+
+typedef struct sal_log_mod_error_info {
+	struct {
+		u64 check_info              : 1,
+		    requestor_identifier    : 1,
+		    responder_identifier    : 1,
+		    target_identifier       : 1,
+		    precise_ip              : 1,
+		    reserved                : 59;
+	} valid;
+	u64 check_info;
+	u64 requestor_identifier;
+	u64 responder_identifier;
+	u64 target_identifier;
+	u64 precise_ip;
+} sal_log_mod_error_info_t;
+
+typedef struct sal_processor_static_info {
+	struct {
+		u64 minstate        : 1,
+		    br              : 1,
+		    cr              : 1,
+		    ar              : 1,
+		    rr              : 1,
+		    fr              : 1,
+		    reserved        : 58;
+	} valid;
+	pal_min_state_area_t min_state_area;
+	u64 br[8];
+	u64 cr[128];
+	u64 ar[128];
+	u64 rr[8];
+	struct ia64_fpreg __attribute__ ((packed)) fr[128];
+} sal_processor_static_info_t;
+
+struct sal_cpuid_info {
+	u64 regs[5];
+	u64 reserved;
+};
+
+typedef struct sal_log_processor_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 proc_error_map      : 1,
+		    proc_state_param    : 1,
+		    proc_cr_lid         : 1,
+		    psi_static_struct   : 1,
+		    num_cache_check     : 4,
+		    num_tlb_check       : 4,
+		    num_bus_check       : 4,
+		    num_reg_file_check  : 4,
+		    num_ms_check        : 4,
+		    cpuid_info          : 1,
+		    reserved1           : 39;
+	} valid;
+	u64 proc_error_map;
+	u64 proc_state_parameter;
+	u64 proc_cr_lid;
+	/*
+	 * The rest of this structure consists of variable-length arrays, which can't be
+	 * expressed in C.
+	 */
+	sal_log_mod_error_info_t info[0];
+	/*
+	 * This is what the rest looked like if C supported variable-length arrays:
+	 *
+	 * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
+	 * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
+	 * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
+	 * sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check];
+	 * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
+	 * struct sal_cpuid_info cpuid_info;
+	 * sal_processor_static_info_t processor_static_info;
+	 */
+} sal_log_processor_info_t;
+
+/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */
+#define SAL_LPI_PSI_INFO(l)									\
+({	sal_log_processor_info_t *_l = (l);							\
+	((sal_processor_static_info_t *)							\
+	 ((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check		\
+				+ _l->valid.num_bus_check + _l->valid.num_reg_file_check	\
+				+ _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t)	\
+			       + sizeof(struct sal_cpuid_info))));				\
+})
+
+/* platform error log structures */
+
+typedef struct sal_log_mem_dev_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 error_status    : 1,
+		    physical_addr   : 1,
+		    addr_mask       : 1,
+		    node            : 1,
+		    card            : 1,
+		    module          : 1,
+		    bank            : 1,
+		    device          : 1,
+		    row             : 1,
+		    column          : 1,
+		    bit_position    : 1,
+		    requestor_id    : 1,
+		    responder_id    : 1,
+		    target_id       : 1,
+		    bus_spec_data   : 1,
+		    oem_id          : 1,
+		    oem_data        : 1,
+		    reserved        : 47;
+	} valid;
+	u64 error_status;
+	u64 physical_addr;
+	u64 addr_mask;
+	u16 node;
+	u16 card;
+	u16 module;
+	u16 bank;
+	u16 device;
+	u16 row;
+	u16 column;
+	u16 bit_position;
+	u64 requestor_id;
+	u64 responder_id;
+	u64 target_id;
+	u64 bus_spec_data;
+	u8 oem_id[16];
+	u8 oem_data[1];			/* Variable length data */
+} sal_log_mem_dev_err_info_t;
+
+typedef struct sal_log_sel_dev_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 record_id       : 1,
+		    record_type     : 1,
+		    generator_id    : 1,
+		    evm_rev         : 1,
+		    sensor_type     : 1,
+		    sensor_num      : 1,
+		    event_dir       : 1,
+		    event_data1     : 1,
+		    event_data2     : 1,
+		    event_data3     : 1,
+		    reserved        : 54;
+	} valid;
+	u16 record_id;
+	u8 record_type;
+	u8 timestamp[4];
+	u16 generator_id;
+	u8 evm_rev;
+	u8 sensor_type;
+	u8 sensor_num;
+	u8 event_dir;
+	u8 event_data1;
+	u8 event_data2;
+	u8 event_data3;
+} sal_log_sel_dev_err_info_t;
+
+typedef struct sal_log_pci_bus_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 err_status      : 1,
+		    err_type        : 1,
+		    bus_id          : 1,
+		    bus_address     : 1,
+		    bus_data        : 1,
+		    bus_cmd         : 1,
+		    requestor_id    : 1,
+		    responder_id    : 1,
+		    target_id       : 1,
+		    oem_data        : 1,
+		    reserved        : 54;
+	} valid;
+	u64 err_status;
+	u16 err_type;
+	u16 bus_id;
+	u32 reserved;
+	u64 bus_address;
+	u64 bus_data;
+	u64 bus_cmd;
+	u64 requestor_id;
+	u64 responder_id;
+	u64 target_id;
+	u8 oem_data[1];			/* Variable length data */
+} sal_log_pci_bus_err_info_t;
+
+typedef struct sal_log_smbios_dev_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 event_type      : 1,
+		    length          : 1,
+		    time_stamp      : 1,
+		    data            : 1,
+		    reserved1       : 60;
+	} valid;
+	u8 event_type;
+	u8 length;
+	u8 time_stamp[6];
+	u8 data[1];			/* data of variable length, length == slsmb_length */
+} sal_log_smbios_dev_err_info_t;
+
+typedef struct sal_log_pci_comp_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 err_status      : 1,
+		    comp_info       : 1,
+		    num_mem_regs    : 1,
+		    num_io_regs     : 1,
+		    reg_data_pairs  : 1,
+		    oem_data        : 1,
+		    reserved        : 58;
+	} valid;
+	u64 err_status;
+	struct {
+		u16 vendor_id;
+		u16 device_id;
+		u8 class_code[3];
+		u8 func_num;
+		u8 dev_num;
+		u8 bus_num;
+		u8 seg_num;
+		u8 reserved[5];
+	} comp_info;
+	u32 num_mem_regs;
+	u32 num_io_regs;
+	u64 reg_data_pairs[1];
+	/*
+	 * array of address/data register pairs is num_mem_regs + num_io_regs elements
+	 * long.  Each array element consists of a u64 address followed by a u64 data
+	 * value.  The oem_data array immediately follows the reg_data_pairs array
+	 */
+	u8 oem_data[1];			/* Variable length data */
+} sal_log_pci_comp_err_info_t;
+
+typedef struct sal_log_plat_specific_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 err_status      : 1,
+		    guid            : 1,
+		    oem_data        : 1,
+		    reserved        : 61;
+	} valid;
+	u64 err_status;
+	efi_guid_t guid;
+	u8 oem_data[1];			/* platform specific variable length data */
+} sal_log_plat_specific_err_info_t;
+
+typedef struct sal_log_host_ctlr_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 err_status      : 1,
+		    requestor_id    : 1,
+		    responder_id    : 1,
+		    target_id       : 1,
+		    bus_spec_data   : 1,
+		    oem_data        : 1,
+		    reserved        : 58;
+	} valid;
+	u64 err_status;
+	u64 requestor_id;
+	u64 responder_id;
+	u64 target_id;
+	u64 bus_spec_data;
+	u8 oem_data[1];			/* Variable length OEM data */
+} sal_log_host_ctlr_err_info_t;
+
+typedef struct sal_log_plat_bus_err_info {
+	sal_log_section_hdr_t header;
+	struct {
+		u64 err_status      : 1,
+		    requestor_id    : 1,
+		    responder_id    : 1,
+		    target_id       : 1,
+		    bus_spec_data   : 1,
+		    oem_data        : 1,
+		    reserved        : 58;
+	} valid;
+	u64 err_status;
+	u64 requestor_id;
+	u64 responder_id;
+	u64 target_id;
+	u64 bus_spec_data;
+	u8 oem_data[1];			/* Variable length OEM data */
+} sal_log_plat_bus_err_info_t;
+
+/* Overall platform error section structure */
+typedef union sal_log_platform_err_info {
+	sal_log_mem_dev_err_info_t mem_dev_err;
+	sal_log_sel_dev_err_info_t sel_dev_err;
+	sal_log_pci_bus_err_info_t pci_bus_err;
+	sal_log_smbios_dev_err_info_t smbios_dev_err;
+	sal_log_pci_comp_err_info_t pci_comp_err;
+	sal_log_plat_specific_err_info_t plat_specific_err;
+	sal_log_host_ctlr_err_info_t host_ctlr_err;
+	sal_log_plat_bus_err_info_t plat_bus_err;
+} sal_log_platform_err_info_t;
+
+/* SAL log over-all, multi-section error record structure (processor+platform) */
+typedef struct err_rec {
+	sal_log_record_header_t sal_elog_header;
+	sal_log_processor_info_t proc_err;
+	sal_log_platform_err_info_t plat_err;
+	u8 oem_data_pad[1024];
+} ia64_err_rec_t;
+
+/*
+ * Now define a couple of inline functions for improved type checking
+ * and convenience.
+ */
+
+extern s64 ia64_sal_cache_flush (u64 cache_type);
+extern void __init check_sal_cache_flush (void);
+
+/* Initialize all the processor and platform level instruction and data caches */
+static inline s64
+ia64_sal_cache_init (void)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
+	return isrv.status;
+}
+
+/*
+ * Clear the processor and platform information logged by SAL with respect to the machine
+ * state at the time of MCA's, INITs, CMCs, or CPEs.
+ */
+static inline s64
+ia64_sal_clear_state_info (u64 sal_info_type)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
+	              0, 0, 0, 0, 0);
+	return isrv.status;
+}
+
+
+/* Get the processor and platform information logged by SAL with respect to the machine
+ * state at the time of the MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+	              sal_info, 0, 0, 0, 0);
+	if (isrv.status)
+		return 0;
+
+	return isrv.v0;
+}
+
+/*
+ * Get the maximum size of the information logged by SAL with respect to the machine state
+ * at the time of MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info_size (u64 sal_info_type)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
+	              0, 0, 0, 0, 0);
+	if (isrv.status)
+		return 0;
+	return isrv.v0;
+}
+
+/*
+ * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
+ * the monarch processor.  Must not lock, because it will not return on any cpu until the
+ * monarch processor sends a wake up.
+ */
+static inline s64
+ia64_sal_mc_rendez (void)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
+	return isrv.status;
+}
+
+/*
+ * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
+ * the machine check rendezvous sequence as well as the mechanism to wake up the
+ * non-monarch processor at the end of machine check processing.
+ * Returns the complete ia64_sal_retval because some calls return more than just a status
+ * value.
+ */
+static inline struct ia64_sal_retval
+ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
+		 timeout, rz_always, 0, 0);
+	return isrv;
+}
+
+/* Read from PCI configuration space */
+static inline s64
+ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0);
+	if (value)
+		*value = isrv.v0;
+	return isrv.status;
+}
+
+/* Write to PCI configuration space */
+static inline s64
+ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
+	         type, 0, 0, 0);
+	return isrv.status;
+}
+
+/*
+ * Register physical addresses of locations needed by SAL when SAL procedures are invoked
+ * in virtual mode.
+ */
+static inline s64
+ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
+	         0, 0, 0, 0, 0);
+	return isrv.status;
+}
+
+/*
+ * Register software dependent code locations within SAL. These locations are handlers or
+ * entry points where SAL will pass control for the specified event. These event handlers
+ * are for the bott rendezvous, MCAs and INIT scenarios.
+ */
+static inline s64
+ia64_sal_set_vectors (u64 vector_type,
+		      u64 handler_addr1, u64 gp1, u64 handler_len1,
+		      u64 handler_addr2, u64 gp2, u64 handler_len2)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
+			handler_addr1, gp1, handler_len1,
+			handler_addr2, gp2, handler_len2);
+
+	return isrv.status;
+}
+
+/* Update the contents of PAL block in the non-volatile storage device */
+static inline s64
+ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
+		     u64 *error_code, u64 *scratch_buf_size_needed)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
+	         0, 0, 0, 0);
+	if (error_code)
+		*error_code = isrv.v0;
+	if (scratch_buf_size_needed)
+		*scratch_buf_size_needed = isrv.v1;
+	return isrv.status;
+}
+
+/* Get physical processor die mapping in the platform. */
+static inline s64
+ia64_sal_physical_id_info(u16 *splid)
+{
+	struct ia64_sal_retval isrv;
+
+	if (sal_revision < SAL_VERSION_CODE(3,2))
+		return -1;
+
+	SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
+	if (splid)
+		*splid = isrv.v0;
+	return isrv.status;
+}
+
+extern unsigned long sal_platform_features;
+
+extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
+
+struct sal_ret_values {
+	long r8; long r9; long r10; long r11;
+};
+
+#define IA64_SAL_OEMFUNC_MIN		0x02000000
+#define IA64_SAL_OEMFUNC_MAX		0x03ffffff
+
+extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
+			    u64, u64, u64);
+extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
+				   u64, u64, u64, u64, u64);
+extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
+				      u64, u64, u64, u64, u64);
+extern long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+		    unsigned long *drift_info);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ * Note: region regs are stored first in head.S _start. Hence they must
+ * stay up front.
+ */
+struct sal_to_os_boot {
+	u64 rr[8];		/* Region Registers */
+	u64 br[6];		/* br0:
+				 * return addr into SAL boot rendez routine */
+	u64 gr1;		/* SAL:GP */
+	u64 gr12;		/* SAL:SP */
+	u64 gr13;		/* SAL: Task Pointer */
+	u64 fpsr;
+	u64 pfs;
+	u64 rnat;
+	u64 unat;
+	u64 bspstore;
+	u64 dcr;		/* Default Control Register */
+	u64 iva;
+	u64 pta;
+	u64 itv;
+	u64 pmv;
+	u64 cmcv;
+	u64 lrr[2];
+	u64 gr[4];
+	u64 pr;			/* Predicate registers */
+	u64 lc;			/* Loop Count */
+	struct ia64_fpreg fp[20];
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
+
+extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+
+#define PALO_MAX_TLB_PURGES	0xFFFF
+#define PALO_SIG	"PALO"
+
+struct palo_table {
+	u8  signature[4];	/* Should be "PALO" */
+	u32 length;
+	u8  minor_revision;
+	u8  major_revision;
+	u8  checksum;
+	u8  reserved1[5];
+	u16 max_tlb_purges;
+	u8  reserved2[6];
+};
+
+#define NPTCG_FROM_PAL			0
+#define NPTCG_FROM_PALO			1
+#define NPTCG_FROM_KERNEL_PARAMETER	2
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SAL_H */
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
new file mode 100644
index 0000000..2ab2003
--- /dev/null
+++ b/arch/ia64/include/asm/sections.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_SECTIONS_H
+#define _ASM_IA64_SECTIONS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/elf.h>
+#include <linux/uaccess.h>
+#include <asm-generic/sections.h>
+
+extern char __phys_per_cpu_start[];
+#ifdef	CONFIG_SMP
+extern char __cpu0_per_cpu[];
+#endif
+extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
+extern char __start___rse_patchlist[], __end___rse_patchlist[];
+extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
+extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
+extern char __start_gate_section[];
+extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
+extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
+extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
+extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
+extern char __start_unwind[], __end_unwind[];
+extern char __start_ivt_text[], __end_ivt_text[];
+
+#undef dereference_function_descriptor
+static inline void *dereference_function_descriptor(void *ptr)
+{
+	struct fdesc *desc = ptr;
+	void *p;
+
+	if (!probe_kernel_address(&desc->ip, p))
+		ptr = p;
+	return ptr;
+}
+
+
+#endif /* _ASM_IA64_SECTIONS_H */
+
diff --git a/arch/ia64/include/asm/segment.h b/arch/ia64/include/asm/segment.h
new file mode 100644
index 0000000..b89e2b3
--- /dev/null
+++ b/arch/ia64/include/asm/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_SEGMENT_H
+#define _ASM_IA64_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* _ASM_IA64_SEGMENT_H */
diff --git a/arch/ia64/include/asm/serial.h b/arch/ia64/include/asm/serial.h
new file mode 100644
index 0000000..068be11
--- /dev/null
+++ b/arch/ia64/include/asm/serial.h
@@ -0,0 +1,17 @@
+/*
+ * Derived from the i386 version.
+ */
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+/*
+ * All legacy serial ports should be enumerated via ACPI namespace, so
+ * we need not list them here.
+ */
diff --git a/arch/ia64/include/asm/shmparam.h b/arch/ia64/include/asm/shmparam.h
new file mode 100644
index 0000000..d07508d
--- /dev/null
+++ b/arch/ia64/include/asm/shmparam.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_SHMPARAM_H
+#define _ASM_IA64_SHMPARAM_H
+
+/*
+ * SHMLBA controls minimum alignment at which shared memory segments
+ * get attached.  The IA-64 architecture says that there may be a
+ * performance degradation when there are virtual aliases within 1MB.
+ * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
+ */
+#define	SHMLBA	(1024*1024)
+
+#endif /* _ASM_IA64_SHMPARAM_H */
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
new file mode 100644
index 0000000..6f2e2dd
--- /dev/null
+++ b/arch/ia64/include/asm/siginfo.h
@@ -0,0 +1,23 @@
+/*
+ * Based on <asm-i386/siginfo.h>.
+ *
+ * Modified 1998-2002
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_SIGINFO_H
+#define _ASM_IA64_SIGINFO_H
+
+#include <linux/string.h>
+#include <uapi/asm/siginfo.h>
+
+static inline void
+copy_siginfo (siginfo_t *to, siginfo_t *from)
+{
+	if (from->si_code < 0)
+		memcpy(to, from, sizeof(siginfo_t));
+	else
+		/* _sigchld is currently the largest know union member */
+		memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
+}
+
+#endif /* _ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/include/asm/signal.h b/arch/ia64/include/asm/signal.h
new file mode 100644
index 0000000..c62afa4
--- /dev/null
+++ b/arch/ia64/include/asm/signal.h
@@ -0,0 +1,32 @@
+/*
+ * Modified 1998-2001, 2003
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x.  Hence the #ifdef __KERNEL__ ugliness.
+ */
+#ifndef _ASM_IA64_SIGNAL_H
+#define _ASM_IA64_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+
+#define _NSIG		64
+#define _NSIG_BPW	64
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+# ifndef __ASSEMBLY__
+
+/* Most things should be clean enough to redefine this at will, if care
+   is taken to make libc match.  */
+
+typedef unsigned long old_sigset_t;
+
+typedef struct {
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#  include <asm/sigcontext.h>
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGNAL_H */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
new file mode 100644
index 0000000..fea21e9
--- /dev/null
+++ b/arch/ia64/include/asm/smp.h
@@ -0,0 +1,137 @@
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+#ifndef _ASM_IA64_SMP_H
+#define _ASM_IA64_SMP_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <linux/irqreturn.h>
+
+#include <asm/io.h>
+#include <asm/param.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+static inline unsigned int
+ia64_get_lid (void)
+{
+	union {
+		struct {
+			unsigned long reserved : 16;
+			unsigned long eid : 8;
+			unsigned long id : 8;
+			unsigned long ignored : 32;
+		} f;
+		unsigned long bits;
+	} lid;
+
+	lid.bits = ia64_getreg(_IA64_REG_CR_LID);
+	return lid.f.id << 8 | lid.f.eid;
+}
+
+#define hard_smp_processor_id()		ia64_get_lid()
+
+#ifdef CONFIG_SMP
+
+#define XTP_OFFSET		0x1e0008
+
+#define SMP_IRQ_REDIRECTION	(1 << 0)
+#define SMP_IPI_REDIRECTION	(1 << 1)
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+extern struct smp_boot_data {
+	int cpu_count;
+	int cpu_phys_id[NR_CPUS];
+} smp_boot_data __initdata;
+
+extern char no_int_routing;
+
+extern cpumask_t cpu_core_map[NR_CPUS];
+DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
+extern int smp_num_siblings;
+extern void __iomem *ipi_base_addr;
+extern unsigned char smp_int_redirect;
+
+extern volatile int ia64_cpu_to_sapicid[];
+#define cpu_physical_id(i)	ia64_cpu_to_sapicid[i]
+
+extern unsigned long ap_wakeup_vector;
+
+/*
+ * Function to map hard smp processor id to logical id.  Slow, so don't use this in
+ * performance-critical code.
+ */
+static inline int
+cpu_logical_id (int cpuid)
+{
+	int i;
+
+	for (i = 0; i < NR_CPUS; ++i)
+		if (cpu_physical_id(i) == cpuid)
+			break;
+	return i;
+}
+
+/*
+ * XTP control functions:
+ *	min_xtp   : route all interrupts to this CPU
+ *	normal_xtp: nominal XTP value
+ *	max_xtp   : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+		writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+		writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+		writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+/* Upping and downing of CPUs */
+extern int __cpu_disable (void);
+extern void __cpu_die (unsigned int cpu);
+extern void cpu_die (void) __attribute__ ((noreturn));
+extern void __init smp_build_cpu_map(void);
+
+extern void __init init_smp_config (void);
+extern void smp_do_timer (struct pt_regs *regs);
+
+extern irqreturn_t handle_IPI(int irq, void *dev_id);
+extern void smp_send_reschedule (int cpu);
+extern void identify_siblings (struct cpuinfo_ia64 *);
+extern int is_multithreading_enabled(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#else /* CONFIG_SMP */
+
+#define cpu_logical_id(i)		0
+#define cpu_physical_id(i)		ia64_get_lid()
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_IA64_SMP_H */
diff --git a/arch/ia64/include/asm/sn/acpi.h b/arch/ia64/include/asm/sn/acpi.h
new file mode 100644
index 0000000..fd480db
--- /dev/null
+++ b/arch/ia64/include/asm/sn/acpi.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ACPI_H
+#define _ASM_IA64_SN_ACPI_H
+
+extern int sn_acpi_rev;
+#define SN_ACPI_BASE_SUPPORT()   (sn_acpi_rev >= 0x20101)
+
+#endif /* _ASM_IA64_SN_ACPI_H */
diff --git a/arch/ia64/include/asm/sn/addrs.h b/arch/ia64/include/asm/sn/addrs.h
new file mode 100644
index 0000000..e715c79
--- /dev/null
+++ b/arch/ia64/include/asm/sn/addrs.h
@@ -0,0 +1,299 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ADDRS_H
+#define _ASM_IA64_SN_ADDRS_H
+
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/pda.h>
+
+/*
+ *  Memory/SHUB Address Format:
+ *  +-+---------+--+--------------+
+ *  |0|  NASID  |AS| NodeOffset   |
+ *  +-+---------+--+--------------+
+ *
+ *  NASID: (low NASID bit is 0) Memory and SHUB MMRs
+ *   AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
+ *     00: Local Resources and MMR space
+ *           Top bit of NodeOffset
+ *               0: Local resources space
+ *                  node id:
+ *                        0: IA64/NT compatibility space
+ *                        2: Local MMR Space
+ *                        4: Local memory, regardless of local node id
+ *               1: Global MMR space
+ *     01: GET space.
+ *     10: AMO space.
+ *     11: Cacheable memory space.
+ *
+ *   NodeOffset: byte offset
+ *
+ *
+ *  TIO address format:
+ *  +-+----------+--+--------------+
+ *  |0|  NASID   |AS| Nodeoffset   |
+ *  +-+----------+--+--------------+
+ *
+ *  NASID: (low NASID bit is 1) TIO
+ *   AS: 2-bit Chiplet Identifier
+ *     00: TIO LB (Indicates TIO MMR access.)
+ *     01: TIO ICE (indicates coretalk space access.)
+ * 
+ *   NodeOffset: top bit must be set.
+ *
+ *
+ * Note that in both of the above address formats, the low
+ * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
+ */
+
+
+/*
+ * Define basic shift & mask constants for manipulating NASIDs and AS values.
+ */
+#define NASID_BITMASK		(sn_hub_info->nasid_bitmask)
+#define NASID_SHIFT		(sn_hub_info->nasid_shift)
+#define AS_SHIFT		(sn_hub_info->as_shift)
+#define AS_BITMASK		0x3UL
+
+#define NASID_MASK              ((u64)NASID_BITMASK << NASID_SHIFT)
+#define AS_MASK			((u64)AS_BITMASK << AS_SHIFT)
+
+
+/*
+ * AS values. These are the same on both SHUB1 & SHUB2.
+ */
+#define AS_GET_VAL		1UL
+#define AS_AMO_VAL		2UL
+#define AS_CAC_VAL		3UL
+#define AS_GET_SPACE		(AS_GET_VAL << AS_SHIFT)
+#define AS_AMO_SPACE		(AS_AMO_VAL << AS_SHIFT)
+#define AS_CAC_SPACE		(AS_CAC_VAL << AS_SHIFT)
+
+
+/* 
+ * Virtual Mode Local & Global MMR space.  
+ */
+#define SH1_LOCAL_MMR_OFFSET	0x8000000000UL
+#define SH2_LOCAL_MMR_OFFSET	0x0200000000UL
+#define LOCAL_MMR_OFFSET	(is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
+#define LOCAL_MMR_SPACE		(__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
+#define LOCAL_PHYS_MMR_SPACE	(RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
+
+#define SH1_GLOBAL_MMR_OFFSET	0x0800000000UL
+#define SH2_GLOBAL_MMR_OFFSET	0x0300000000UL
+#define GLOBAL_MMR_OFFSET	(is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
+#define GLOBAL_MMR_SPACE	(__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
+
+/*
+ * Physical mode addresses
+ */
+#define GLOBAL_PHYS_MMR_SPACE	(RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
+
+
+/*
+ * Clear region & AS bits.
+ */
+#define TO_PHYS_MASK		(~(RGN_BITS | AS_MASK))
+
+
+/*
+ * Misc NASID manipulation.
+ */
+#define NASID_SPACE(n)		((u64)(n) << NASID_SHIFT)
+#define REMOTE_ADDR(n,a)	(NASID_SPACE(n) | (a))
+#define NODE_OFFSET(x)		((x) & (NODE_ADDRSPACE_SIZE - 1))
+#define NODE_ADDRSPACE_SIZE     (1UL << AS_SHIFT)
+#define NASID_GET(x)		(int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
+#define LOCAL_MMR_ADDR(a)	(LOCAL_MMR_SPACE | (a))
+#define GLOBAL_MMR_ADDR(n,a)	(GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_CAC_ADDR(n,a)	(CAC_BASE | REMOTE_ADDR(n,a))
+#define CHANGE_NASID(n,x)	((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
+#define IS_TIO_NASID(n)		((n) & 1)
+
+
+/* non-II mmr's start at top of big window space (4G) */
+#define BWIN_TOP		0x0000000100000000UL
+
+/*
+ * general address defines
+ */
+#define CAC_BASE		(PAGE_OFFSET | AS_CAC_SPACE)
+#define AMO_BASE		(__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
+#define AMO_PHYS_BASE		(RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
+#define GET_BASE		(PAGE_OFFSET | AS_GET_SPACE)
+
+/*
+ * Convert Memory addresses between various addressing modes.
+ */
+#define TO_PHYS(x)		(TO_PHYS_MASK & (x))
+#define TO_CAC(x)		(CAC_BASE     | TO_PHYS(x))
+#ifdef CONFIG_SGI_SN
+#define TO_AMO(x)		(AMO_BASE     | TO_PHYS(x))
+#define TO_GET(x)		(GET_BASE     | TO_PHYS(x))
+#else
+#define TO_AMO(x)		({ BUG(); x; })
+#define TO_GET(x)		({ BUG(); x; })
+#endif
+
+/*
+ * Covert from processor physical address to II/TIO physical address:
+ *	II - squeeze out the AS bits
+ *	TIO- requires a chiplet id in bits 38-39.  For DMA to memory,
+ *           the chiplet id is zero.  If we implement TIO-TIO dma, we might need
+ *           to insert a chiplet id into this macro.  However, it is our belief
+ *           right now that this chiplet id will be ICE, which is also zero.
+ */
+#define SH1_TIO_PHYS_TO_DMA(x) 						\
+	((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
+
+#define SH2_NETWORK_BANK_OFFSET(x) 					\
+        ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
+
+#define SH2_NETWORK_BANK_SELECT(x) 					\
+        ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4)))	\
+        	>> (sn_hub_info->nasid_shift - 4)) << 36)
+
+#define SH2_NETWORK_ADDRESS(x) 						\
+	(SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
+
+#define SH2_TIO_PHYS_TO_DMA(x) 						\
+        (((u64)(NASID_GET(x)) << 40) | 	SH2_NETWORK_ADDRESS(x))
+
+#define PHYS_TO_TIODMA(x)						\
+	(is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
+
+#define PHYS_TO_DMA(x)							\
+	((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
+
+
+/*
+ * Macros to test for address type.
+ */
+#define IS_AMO_ADDRESS(x)	(((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
+#define IS_AMO_PHYS_ADDRESS(x)	(((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
+
+
+/*
+ * The following definitions pertain to the IO special address
+ * space.  They define the location of the big and little windows
+ * of any given node.
+ */
+#define BWIN_SIZE_BITS			29	/* big window size: 512M */
+#define TIO_BWIN_SIZE_BITS		30	/* big window size: 1G */
+#define NODE_SWIN_BASE(n, w)		((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
+		: RAW_NODE_SWIN_BASE(n, w))
+#define TIO_SWIN_BASE(n, w) 		(TIO_IO_BASE(n) + \
+					    ((u64) (w) << TIO_SWIN_SIZE_BITS))
+#define NODE_IO_BASE(n)			(GLOBAL_MMR_SPACE | NASID_SPACE(n))
+#define TIO_IO_BASE(n)                  (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
+#define BWIN_SIZE			(1UL << BWIN_SIZE_BITS)
+#define NODE_BWIN_BASE0(n)		(NODE_IO_BASE(n) + BWIN_SIZE)
+#define NODE_BWIN_BASE(n, w)		(NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
+#define RAW_NODE_SWIN_BASE(n, w)	(NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
+#define BWIN_WIDGET_MASK		0x7
+#define BWIN_WINDOWNUM(x)		(((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define SH1_IS_BIG_WINDOW_ADDR(x)	((x) & BWIN_TOP)
+
+#define TIO_BWIN_WINDOW_SELECT_MASK	0x7
+#define TIO_BWIN_WINDOWNUM(x)		(((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
+
+#define TIO_HWIN_SHIFT_BITS		33
+#define TIO_HWIN(x)			(NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
+
+/*
+ * The following definitions pertain to the IO special address
+ * space.  They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define SWIN_SIZE_BITS			24
+#define	SWIN_WIDGET_MASK		0xF
+
+#define TIO_SWIN_SIZE_BITS		28
+#define TIO_SWIN_SIZE			(1UL << TIO_SWIN_SIZE_BITS)
+#define TIO_SWIN_WIDGET_MASK		0x3
+
+/*
+ * Convert smallwindow address to xtalk address.
+ *
+ * 'addr' can be physical or virtual address, but will be converted
+ * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
+ */
+#define	SWIN_WIDGETNUM(x)		(((x)  >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+#define TIO_SWIN_WIDGETNUM(x)		(((x)  >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
+
+
+/*
+ * The following macros produce the correct base virtual address for
+ * the hub registers. The REMOTE_HUB_* macro produce
+ * the address for the specified hub's registers.  The intent is
+ * that the appropriate PI, MD, NI, or II register would be substituted
+ * for x.
+ *
+ *   WARNING:
+ *	When certain Hub chip workaround are defined, it's not sufficient
+ *	to dereference the *_HUB_ADDR() macros.  You should instead use
+ *	HUB_L() and HUB_S() if you must deal with pointers to hub registers.
+ *	Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
+ *	They're always safe.
+ */
+/* Shub1 TIO & MMR addressing macros */
+#define SH1_TIO_IOSPACE_ADDR(n,x)					\
+	GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_BWIN_MMR(n,x)					\
+	GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_SWIN_MMR(n,x)					\
+	(NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
+
+#define SH1_REMOTE_MMR(n,x)						\
+	(SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) :		\
+	 	SH1_REMOTE_SWIN_MMR(n,x))
+
+/* Shub1 TIO & MMR addressing macros */
+#define SH2_TIO_IOSPACE_ADDR(n,x)					\
+	((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
+
+#define SH2_REMOTE_MMR(n,x)						\
+	GLOBAL_MMR_ADDR(n,x)
+
+
+/* TIO & MMR addressing macros that work on both shub1 & shub2 */
+#define TIO_IOSPACE_ADDR(n,x)						\
+	((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) :		\
+		 SH2_TIO_IOSPACE_ADDR(n,x)))
+
+#define SH_REMOTE_MMR(n,x)						\
+	(is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
+
+#define REMOTE_HUB_ADDR(n,x)						\
+	(IS_TIO_NASID(n) ?  ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) :	\
+	 ((volatile u64*)SH_REMOTE_MMR(n,x)))
+
+
+#define HUB_L(x)			(*((volatile typeof(*x) *)x))
+#define	HUB_S(x,d)			(*((volatile typeof(*x) *)x) = (d))
+
+#define REMOTE_HUB_L(n, a)		HUB_L(REMOTE_HUB_ADDR((n), (a)))
+#define REMOTE_HUB_S(n, a, d)		HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
+
+/*
+ * Coretalk address breakdown
+ */
+#define CTALK_NASID_SHFT		40
+#define CTALK_NASID_MASK		(0x3FFFULL << CTALK_NASID_SHFT)
+#define CTALK_CID_SHFT			38
+#define CTALK_CID_MASK			(0x3ULL << CTALK_CID_SHFT)
+#define CTALK_NODE_OFFSET		0x3FFFFFFFFF
+
+#endif /* _ASM_IA64_SN_ADDRS_H */
diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h
new file mode 100644
index 0000000..31eb784
--- /dev/null
+++ b/arch/ia64/include/asm/sn/arch.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI specific setup.
+ *
+ * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_IA64_SN_ARCH_H
+#define _ASM_IA64_SN_ARCH_H
+
+#include <linux/numa.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn_cpuid.h>
+
+/*
+ * This is the maximum number of NUMALINK nodes that can be part of a single
+ * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
+ * remote partitions are NOT included in this number.
+ * The number of compact nodes cannot exceed size of a coherency domain.
+ * The purpose of this define is to specify a node count that includes
+ * all C/M/TIO nodes in an SSI system.
+ *
+ * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
+ *
+ * 	Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
+ * 	to ACPI3.0, this limit will be removed. The notion of "compact nodes"
+ * 	should be deleted and TIOs should be included in MAX_NUMNODES.
+ */
+#define MAX_TIO_NODES		MAX_NUMNODES
+#define MAX_COMPACT_NODES	(MAX_NUMNODES + MAX_TIO_NODES)
+
+/*
+ * Maximum number of nodes in all partitions and in all coherency domains.
+ * This is the total number of nodes accessible in the numalink fabric. It
+ * includes all C & M bricks, plus all TIOs.
+ *
+ * This value is also the value of the maximum number of NASIDs in the numalink
+ * fabric.
+ */
+#define MAX_NUMALINK_NODES	16384
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced. They are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct sn_hub_info_s {
+	u8 shub2;
+	u8 nasid_shift;
+	u8 as_shift;
+	u8 shub_1_1_found;
+	u16 nasid_bitmask;
+};
+DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+#define sn_hub_info 	this_cpu_ptr(&__sn_hub_info)
+#define is_shub2()	(sn_hub_info->shub2)
+#define is_shub1()	(sn_hub_info->shub2 == 0)
+
+/*
+ * Use this macro to test if shub 1.1 wars should be enabled
+ */
+#define enable_shub_wars_1_1()	(sn_hub_info->shub_1_1_found)
+
+
+/*
+ * Compact node ID to nasid mappings kept in the per-cpu data areas of each
+ * cpu.
+ */
+DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+#define sn_cnodeid_to_nasid	this_cpu_ptr(&__sn_cnodeid_to_nasid[0])
+
+
+extern u8 sn_partition_id;
+extern u8 sn_system_size;
+extern u8 sn_sharing_domain_size;
+extern u8 sn_region_size;
+
+extern void sn_flush_all_caches(long addr, long bytes);
+extern bool sn_cpu_disable_allowed(int cpu);
+
+#endif /* _ASM_IA64_SN_ARCH_H */
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h
new file mode 100644
index 0000000..cc6c4db
--- /dev/null
+++ b/arch/ia64/include/asm/sn/bte.h
@@ -0,0 +1,234 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+#ifndef _ASM_IA64_SN_BTE_H
+#define _ASM_IA64_SN_BTE_H
+
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/types.h>
+#include <asm/sn/shub_mmr.h>
+
+#define IBCT_NOTIFY             (0x1UL << 4)
+#define IBCT_ZFIL_MODE          (0x1UL << 0)
+
+/* #define BTE_DEBUG */
+/* #define BTE_DEBUG_VERBOSE */
+
+#ifdef BTE_DEBUG
+#  define BTE_PRINTK(x) printk x	/* Terse */
+#  ifdef BTE_DEBUG_VERBOSE
+#    define BTE_PRINTKV(x) printk x	/* Verbose */
+#  else
+#    define BTE_PRINTKV(x)
+#  endif /* BTE_DEBUG_VERBOSE */
+#else
+#  define BTE_PRINTK(x)
+#  define BTE_PRINTKV(x)
+#endif	/* BTE_DEBUG */
+
+
+/* BTE status register only supports 16 bits for length field */
+#define BTE_LEN_BITS (16)
+#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
+#define BTE_MAX_XFER (BTE_LEN_MASK << L1_CACHE_SHIFT)
+
+
+/* Define hardware */
+#define BTES_PER_NODE (is_shub2() ? 4 : 2)
+#define MAX_BTES_PER_NODE 4
+
+#define BTE2OFF_CTRL	0
+#define BTE2OFF_SRC	(SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0)
+#define BTE2OFF_DEST	(SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0)
+#define BTE2OFF_NOTIFY	(SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0)
+
+#define BTE_BASE_ADDR(interface) 				\
+    (is_shub2() ? (interface == 0) ? SH2_BT_ENG_CSR_0 :		\
+		  (interface == 1) ? SH2_BT_ENG_CSR_1 :		\
+		  (interface == 2) ? SH2_BT_ENG_CSR_2 :		\
+		  		     SH2_BT_ENG_CSR_3 		\
+		: (interface == 0) ? IIO_IBLS0 : IIO_IBLS1)
+
+#define BTE_SOURCE_ADDR(base)					\
+    (is_shub2() ? base + (BTE2OFF_SRC/8) 			\
+		: base + (BTEOFF_SRC/8))
+
+#define BTE_DEST_ADDR(base)					\
+    (is_shub2() ? base + (BTE2OFF_DEST/8) 			\
+		: base + (BTEOFF_DEST/8))
+
+#define BTE_CTRL_ADDR(base)					\
+    (is_shub2() ? base + (BTE2OFF_CTRL/8) 			\
+		: base + (BTEOFF_CTRL/8))
+
+#define BTE_NOTIF_ADDR(base)					\
+    (is_shub2() ? base + (BTE2OFF_NOTIFY/8) 			\
+		: base + (BTEOFF_NOTIFY/8))
+
+/* Define hardware modes */
+#define BTE_NOTIFY IBCT_NOTIFY
+#define BTE_NORMAL BTE_NOTIFY
+#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
+/* Use a reserved bit to let the caller specify a wait for any BTE */
+#define BTE_WACQUIRE 0x4000
+/* Use the BTE on the node with the destination memory */
+#define BTE_USE_DEST (BTE_WACQUIRE << 1)
+/* Use any available BTE interface on any node for the transfer */
+#define BTE_USE_ANY (BTE_USE_DEST << 1)
+/* macro to force the IBCT0 value valid */
+#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
+
+#define BTE_ACTIVE		(IBLS_BUSY | IBLS_ERROR)
+#define BTE_WORD_AVAILABLE	(IBLS_BUSY << 1)
+#define BTE_WORD_BUSY		(~BTE_WORD_AVAILABLE)
+
+/*
+ * Some macros to simplify reading.
+ * Start with macros to locate the BTE control registers.
+ */
+#define BTE_LNSTAT_LOAD(_bte)						\
+			HUB_L(_bte->bte_base_addr)
+#define BTE_LNSTAT_STORE(_bte, _x)					\
+			HUB_S(_bte->bte_base_addr, (_x))
+#define BTE_SRC_STORE(_bte, _x)						\
+({									\
+		u64 __addr = ((_x) & ~AS_MASK);				\
+		if (is_shub2()) 					\
+			__addr = SH2_TIO_PHYS_TO_DMA(__addr);		\
+		HUB_S(_bte->bte_source_addr, __addr);			\
+})
+#define BTE_DEST_STORE(_bte, _x)					\
+({									\
+		u64 __addr = ((_x) & ~AS_MASK);				\
+		if (is_shub2()) 					\
+			__addr = SH2_TIO_PHYS_TO_DMA(__addr);		\
+		HUB_S(_bte->bte_destination_addr, __addr);		\
+})
+#define BTE_CTRL_STORE(_bte, _x)					\
+			HUB_S(_bte->bte_control_addr, (_x))
+#define BTE_NOTIF_STORE(_bte, _x)					\
+({									\
+		u64 __addr = ia64_tpa((_x) & ~AS_MASK);			\
+		if (is_shub2()) 					\
+			__addr = SH2_TIO_PHYS_TO_DMA(__addr);		\
+		HUB_S(_bte->bte_notify_addr, __addr);			\
+})
+
+#define BTE_START_TRANSFER(_bte, _len, _mode)				\
+	is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
+		: BTE_LNSTAT_STORE(_bte, _len);				\
+		  BTE_CTRL_STORE(_bte, _mode)
+
+/* Possible results from bte_copy and bte_unaligned_copy */
+/* The following error codes map into the BTE hardware codes
+ * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
+ * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
+ * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
+ * codes to give the following error codes.
+ */
+#define BTEFAIL_OFFSET	1
+
+typedef enum {
+	BTE_SUCCESS,		/* 0 is success */
+	BTEFAIL_DIR,		/* Directory error due to IIO access*/
+	BTEFAIL_POISON,		/* poison error on IO access (write to poison page) */
+	BTEFAIL_WERR,		/* Write error (ie WINV to a Read only line) */
+	BTEFAIL_ACCESS,		/* access error (protection violation) */
+	BTEFAIL_PWERR,		/* Partial Write Error */
+	BTEFAIL_PRERR,		/* Partial Read Error */
+	BTEFAIL_TOUT,		/* CRB Time out */
+	BTEFAIL_XTERR,		/* Incoming xtalk pkt had error bit */
+	BTEFAIL_NOTAVAIL,	/* BTE not available */
+} bte_result_t;
+
+#define BTEFAIL_SH2_RESP_SHORT	0x1	/* bit 000001 */
+#define BTEFAIL_SH2_RESP_LONG	0x2	/* bit 000010 */
+#define BTEFAIL_SH2_RESP_DSP	0x4	/* bit 000100 */
+#define BTEFAIL_SH2_RESP_ACCESS	0x8	/* bit 001000 */
+#define BTEFAIL_SH2_CRB_TO	0x10	/* bit 010000 */
+#define BTEFAIL_SH2_NACK_LIMIT	0x20	/* bit 100000 */
+#define BTEFAIL_SH2_ALL		0x3F	/* bit 111111 */
+
+#define	BTE_ERR_BITS	0x3FUL
+#define	BTE_ERR_SHIFT	36
+#define BTE_ERR_MASK	(BTE_ERR_BITS << BTE_ERR_SHIFT)
+
+#define BTE_ERROR_RETRY(value)						\
+	(is_shub2() ? (value != BTEFAIL_SH2_CRB_TO)			\
+		: (value != BTEFAIL_TOUT))
+
+/*
+ * On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2()
+ */
+#define BTE_SHUB2_ERROR(_status)					\
+	((_status & BTE_ERR_MASK) 					\
+	   ? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \
+	   : _status)
+
+#define BTE_GET_ERROR_STATUS(_status)					\
+	(BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR)
+
+#define BTE_VALID_SH2_ERROR(value)					\
+	((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL))
+
+/*
+ * Structure defining a bte.  An instance of this
+ * structure is created in the nodepda for each
+ * bte on that node (as defined by BTES_PER_NODE)
+ * This structure contains everything necessary
+ * to work with a BTE.
+ */
+struct bteinfo_s {
+	volatile u64 notify ____cacheline_aligned;
+	u64 *bte_base_addr ____cacheline_aligned;
+	u64 *bte_source_addr;
+	u64 *bte_destination_addr;
+	u64 *bte_control_addr;
+	u64 *bte_notify_addr;
+	spinlock_t spinlock;
+	cnodeid_t bte_cnode;	/* cnode                            */
+	int bte_error_count;	/* Number of errors encountered     */
+	int bte_num;		/* 0 --> BTE0, 1 --> BTE1           */
+	int cleanup_active;	/* Interface is locked for cleanup  */
+	volatile bte_result_t bh_error;	/* error while processing   */
+	volatile u64 *most_rcnt_na;
+	struct bteinfo_s *btes_to_try[MAX_BTES_PER_NODE];
+};
+
+
+/*
+ * Function prototypes (functions defined in bte.c, used elsewhere)
+ */
+extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
+extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
+extern void bte_error_handler(unsigned long);
+
+#define bte_zero(dest, len, mode, notification) \
+	bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
+
+/*
+ * The following is the preferred way of calling bte_unaligned_copy
+ * If the copy is fully cache line aligned, then bte_copy is
+ * used instead.  Since bte_copy is inlined, this saves a call
+ * stack.  NOTE: bte_copy is called synchronously and does block
+ * until the transfer is complete.  In order to get the asynch
+ * version of bte_copy, you must perform this check yourself.
+ */
+#define BTE_UNALIGNED_COPY(src, dest, len, mode)			\
+	(((len & (L1_CACHE_BYTES - 1)) ||				\
+	  (src & (L1_CACHE_BYTES - 1)) ||				\
+	  (dest & (L1_CACHE_BYTES - 1))) ?				\
+	 bte_unaligned_copy(src, dest, len, mode) :			\
+	 bte_copy(src, dest, len, mode, NULL))
+
+
+#endif	/* _ASM_IA64_SN_BTE_H */
diff --git a/arch/ia64/include/asm/sn/clksupport.h b/arch/ia64/include/asm/sn/clksupport.h
new file mode 100644
index 0000000..d340c36
--- /dev/null
+++ b/arch/ia64/include/asm/sn/clksupport.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * This file contains definitions for accessing a platform supported high resolution
+ * clock. The clock is monitonically increasing and can be accessed from any node
+ * in the system. The clock is synchronized across nodes - all nodes see the
+ * same value.
+ * 
+ *	RTC_COUNTER_ADDR - contains the address of the counter 
+ *
+ */
+
+#ifndef _ASM_IA64_SN_CLKSUPPORT_H
+#define _ASM_IA64_SN_CLKSUPPORT_H
+
+extern unsigned long sn_rtc_cycles_per_second;
+
+#define RTC_COUNTER_ADDR	((long *)LOCAL_MMR_ADDR(SH_RTC))
+
+#define rtc_time()		(*RTC_COUNTER_ADDR)
+
+#endif /* _ASM_IA64_SN_CLKSUPPORT_H */
diff --git a/arch/ia64/include/asm/sn/geo.h b/arch/ia64/include/asm/sn/geo.h
new file mode 100644
index 0000000..f083c94
--- /dev/null
+++ b/arch/ia64/include/asm/sn/geo.h
@@ -0,0 +1,132 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_GEO_H
+#define _ASM_IA64_SN_GEO_H
+
+/* The geoid_t implementation below is based loosely on the pcfg_t
+   implementation in sys/SN/promcfg.h. */
+
+/* Type declaractions */
+
+/* Size of a geoid_t structure (must be before decl. of geoid_u) */
+#define GEOID_SIZE	8	/* Would 16 be better?  The size can
+				   be different on different platforms. */
+
+#define MAX_SLOTS	0xf	/* slots per module */
+#define MAX_SLABS	0xf	/* slabs per slot */
+
+typedef unsigned char	geo_type_t;
+
+/* Fields common to all substructures */
+typedef struct geo_common_s {
+    moduleid_t	module;		/* The module (box) this h/w lives in */
+    geo_type_t	type;		/* What type of h/w is named by this geoid_t */
+    slabid_t	slab:4;		/* slab (ASIC), 0 .. 15 within slot */
+    slotid_t	slot:4;		/* slot (Blade), 0 .. 15 within module */
+} geo_common_t;
+
+/* Additional fields for particular types of hardware */
+typedef struct geo_node_s {
+    geo_common_t	common;		/* No additional fields needed */
+} geo_node_t;
+
+typedef struct geo_rtr_s {
+    geo_common_t	common;		/* No additional fields needed */
+} geo_rtr_t;
+
+typedef struct geo_iocntl_s {
+    geo_common_t	common;		/* No additional fields needed */
+} geo_iocntl_t;
+
+typedef struct geo_pcicard_s {
+    geo_iocntl_t	common;
+    char		bus;	/* Bus/widget number */
+    char		slot;	/* PCI slot number */
+} geo_pcicard_t;
+
+/* Subcomponents of a node */
+typedef struct geo_cpu_s {
+    geo_node_t	node;
+    char	slice;		/* Which CPU on the node */
+} geo_cpu_t;
+
+typedef struct geo_mem_s {
+    geo_node_t	node;
+    char	membus;		/* The memory bus on the node */
+    char	memslot;	/* The memory slot on the bus */
+} geo_mem_t;
+
+
+typedef union geoid_u {
+    geo_common_t	common;
+    geo_node_t		node;
+    geo_iocntl_t	iocntl;
+    geo_pcicard_t	pcicard;
+    geo_rtr_t		rtr;
+    geo_cpu_t		cpu;
+    geo_mem_t		mem;
+    char		padsize[GEOID_SIZE];
+} geoid_t;
+
+
+/* Preprocessor macros */
+
+#define GEO_MAX_LEN	48	/* max. formatted length, plus some pad:
+				   module/001c07/slab/5/node/memory/2/slot/4 */
+
+/* Values for geo_type_t */
+#define GEO_TYPE_INVALID	0
+#define GEO_TYPE_MODULE		1
+#define GEO_TYPE_NODE		2
+#define GEO_TYPE_RTR		3
+#define GEO_TYPE_IOCNTL		4
+#define GEO_TYPE_IOCARD		5
+#define GEO_TYPE_CPU		6
+#define GEO_TYPE_MEM		7
+#define GEO_TYPE_MAX		(GEO_TYPE_MEM+1)
+
+/* Parameter for hwcfg_format_geoid_compt() */
+#define GEO_COMPT_MODULE	1
+#define GEO_COMPT_SLAB		2
+#define GEO_COMPT_IOBUS		3
+#define GEO_COMPT_IOSLOT	4
+#define GEO_COMPT_CPU		5
+#define GEO_COMPT_MEMBUS	6
+#define GEO_COMPT_MEMSLOT	7
+
+#define GEO_INVALID_STR		"<invalid>"
+
+#define INVALID_NASID           ((nasid_t)-1)
+#define INVALID_CNODEID         ((cnodeid_t)-1)
+#define INVALID_PNODEID         ((pnodeid_t)-1)
+#define INVALID_SLAB            (slabid_t)-1
+#define INVALID_SLOT            (slotid_t)-1
+#define INVALID_MODULE          ((moduleid_t)-1)
+
+static inline slabid_t geo_slab(geoid_t g)
+{
+	return (g.common.type == GEO_TYPE_INVALID) ?
+		INVALID_SLAB : g.common.slab;
+}
+
+static inline slotid_t geo_slot(geoid_t g)
+{
+	return (g.common.type == GEO_TYPE_INVALID) ?
+		INVALID_SLOT : g.common.slot;
+}
+
+static inline moduleid_t geo_module(geoid_t g)
+{
+	return (g.common.type == GEO_TYPE_INVALID) ?
+		INVALID_MODULE : g.common.module;
+}
+
+extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
+
+#endif /* _ASM_IA64_SN_GEO_H */
diff --git a/arch/ia64/include/asm/sn/intr.h b/arch/ia64/include/asm/sn/intr.h
new file mode 100644
index 0000000..e0487aa
--- /dev/null
+++ b/arch/ia64/include/asm/sn/intr.h
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_INTR_H
+#define _ASM_IA64_SN_INTR_H
+
+#include <linux/rcupdate.h>
+#include <asm/sn/types.h>
+
+#define SGI_UART_VECTOR		0xe9
+
+/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
+#define SGI_XPC_ACTIVATE	0x30
+#define SGI_II_ERROR		0x31
+#define SGI_XBOW_ERROR		0x32
+#define SGI_PCIASIC_ERROR	0x33
+#define SGI_ACPI_SCI_INT	0x34
+#define SGI_TIOCA_ERROR		0x35
+#define SGI_TIO_ERROR		0x36
+#define SGI_TIOCX_ERROR		0x37
+#define SGI_MMTIMER_VECTOR	0x38
+#define SGI_XPC_NOTIFY		0xe7
+
+#define IA64_SN2_FIRST_DEVICE_VECTOR	0x3c
+#define IA64_SN2_LAST_DEVICE_VECTOR	0xe6
+
+#define SN2_IRQ_RESERVED	0x1
+#define SN2_IRQ_CONNECTED	0x2
+#define SN2_IRQ_SHARED		0x4
+
+// The SN PROM irq struct
+struct sn_irq_info {
+	struct sn_irq_info *irq_next;	/* deprecated DO NOT USE     */
+	short		irq_nasid;	/* Nasid IRQ is assigned to  */
+	int		irq_slice;	/* slice IRQ is assigned to  */
+	int		irq_cpuid;	/* kernel logical cpuid	     */
+	int		irq_irq;	/* the IRQ number */
+	int		irq_int_bit;	/* Bridge interrupt pin */
+					/* <0 means MSI */
+	u64	irq_xtalkaddr;	/* xtalkaddr IRQ is sent to  */
+	int		irq_bridge_type;/* pciio asic type (pciio.h) */
+	void	       *irq_bridge;	/* bridge generating irq     */
+	void	       *irq_pciioinfo;	/* associated pciio_info_t   */
+	int		irq_last_intr;	/* For Shub lb lost intr WAR */
+	int		irq_cookie;	/* unique cookie 	     */
+	int		irq_flags;	/* flags */
+	int		irq_share_cnt;	/* num devices sharing IRQ   */
+	struct list_head	list;	/* list of sn_irq_info structs */
+	struct rcu_head		rcu;	/* rcu callback list */
+};
+
+extern void sn_send_IPI_phys(int, long, int, int);
+extern u64 sn_intr_alloc(nasid_t, int,
+			      struct sn_irq_info *,
+			      int, nasid_t, int);
+extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
+extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
+extern void sn_set_err_irq_affinity(unsigned int);
+extern struct list_head **sn_irq_lh;
+
+#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
+
+#endif /* _ASM_IA64_SN_INTR_H */
diff --git a/arch/ia64/include/asm/sn/io.h b/arch/ia64/include/asm/sn/io.h
new file mode 100644
index 0000000..41c73a7
--- /dev/null
+++ b/arch/ia64/include/asm/sn/io.h
@@ -0,0 +1,274 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_SN_IO_H
+#define _ASM_SN_IO_H
+#include <linux/compiler.h>
+#include <asm/intrinsics.h>
+
+extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
+extern void __sn_mmiowb(void); /* Forward definition */
+
+extern int num_cnodes;
+
+#define __sn_mf_a()   ia64_mfa()
+
+extern void sn_dma_flush(unsigned long);
+
+#define __sn_inb ___sn_inb
+#define __sn_inw ___sn_inw
+#define __sn_inl ___sn_inl
+#define __sn_outb ___sn_outb
+#define __sn_outw ___sn_outw
+#define __sn_outl ___sn_outl
+#define __sn_readb ___sn_readb
+#define __sn_readw ___sn_readw
+#define __sn_readl ___sn_readl
+#define __sn_readq ___sn_readq
+#define __sn_readb_relaxed ___sn_readb_relaxed
+#define __sn_readw_relaxed ___sn_readw_relaxed
+#define __sn_readl_relaxed ___sn_readl_relaxed
+#define __sn_readq_relaxed ___sn_readq_relaxed
+
+/*
+ * Convenience macros for setting/clearing bits using the above accessors
+ */
+
+#define __sn_setq_relaxed(addr, val) \
+	writeq((__sn_readq_relaxed(addr) | (val)), (addr))
+#define __sn_clrq_relaxed(addr, val) \
+	writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
+
+/*
+ * The following routines are SN Platform specific, called when
+ * a reference is made to inX/outX set macros.  SN Platform
+ * inX set of macros ensures that Posted DMA writes on the
+ * Bridge is flushed.
+ *
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned int
+___sn_inb (unsigned long port)
+{
+	volatile unsigned char *addr;
+	unsigned char ret = -1;
+
+	if ((addr = sn_io_addr(port))) {
+		ret = *addr;
+		__sn_mf_a();
+		sn_dma_flush((unsigned long)addr);
+	}
+	return ret;
+}
+
+static inline unsigned int
+___sn_inw (unsigned long port)
+{
+	volatile unsigned short *addr;
+	unsigned short ret = -1;
+
+	if ((addr = sn_io_addr(port))) {
+		ret = *addr;
+		__sn_mf_a();
+		sn_dma_flush((unsigned long)addr);
+	}
+	return ret;
+}
+
+static inline unsigned int
+___sn_inl (unsigned long port)
+{
+	volatile unsigned int *addr;
+	unsigned int ret = -1;
+
+	if ((addr = sn_io_addr(port))) {
+		ret = *addr;
+		__sn_mf_a();
+		sn_dma_flush((unsigned long)addr);
+	}
+	return ret;
+}
+
+static inline void
+___sn_outb (unsigned char val, unsigned long port)
+{
+	volatile unsigned char *addr;
+
+	if ((addr = sn_io_addr(port))) {
+		*addr = val;
+		__sn_mmiowb();
+	}
+}
+
+static inline void
+___sn_outw (unsigned short val, unsigned long port)
+{
+	volatile unsigned short *addr;
+
+	if ((addr = sn_io_addr(port))) {
+		*addr = val;
+		__sn_mmiowb();
+	}
+}
+
+static inline void
+___sn_outl (unsigned int val, unsigned long port)
+{
+	volatile unsigned int *addr;
+
+	if ((addr = sn_io_addr(port))) {
+		*addr = val;
+		__sn_mmiowb();
+	}
+}
+
+/*
+ * The following routines are SN Platform specific, called when 
+ * a reference is made to readX/writeX set macros.  SN Platform 
+ * readX set of macros ensures that Posted DMA writes on the 
+ * Bridge is flushed.
+ * 
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned char
+___sn_readb (const volatile void __iomem *addr)
+{
+	unsigned char val;
+
+	val = *(volatile unsigned char __force *)addr;
+	__sn_mf_a();
+	sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+static inline unsigned short
+___sn_readw (const volatile void __iomem *addr)
+{
+	unsigned short val;
+
+	val = *(volatile unsigned short __force *)addr;
+	__sn_mf_a();
+	sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+static inline unsigned int
+___sn_readl (const volatile void __iomem *addr)
+{
+	unsigned int val;
+
+	val = *(volatile unsigned int __force *)addr;
+	__sn_mf_a();
+	sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+static inline unsigned long
+___sn_readq (const volatile void __iomem *addr)
+{
+	unsigned long val;
+
+	val = *(volatile unsigned long __force *)addr;
+	__sn_mf_a();
+	sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+/*
+ * For generic and SN2 kernels, we have a set of fast access
+ * PIO macros.	These macros are provided on SN Platform
+ * because the normal inX and readX macros perform an
+ * additional task of flushing Post DMA request on the Bridge.
+ *
+ * These routines should be self explainatory.
+ */
+
+static inline unsigned int
+sn_inb_fast (unsigned long port)
+{
+	volatile unsigned char *addr = (unsigned char *)port;
+	unsigned char ret;
+
+	ret = *addr;
+	__sn_mf_a();
+	return ret;
+}
+
+static inline unsigned int
+sn_inw_fast (unsigned long port)
+{
+	volatile unsigned short *addr = (unsigned short *)port;
+	unsigned short ret;
+
+	ret = *addr;
+	__sn_mf_a();
+	return ret;
+}
+
+static inline unsigned int
+sn_inl_fast (unsigned long port)
+{
+	volatile unsigned int *addr = (unsigned int *)port;
+	unsigned int ret;
+
+	ret = *addr;
+	__sn_mf_a();
+	return ret;
+}
+
+static inline unsigned char
+___sn_readb_relaxed (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___sn_readw_relaxed (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___sn_readl_relaxed (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___sn_readq_relaxed (const volatile void __iomem *addr)
+{
+	return *(volatile unsigned long __force *) addr;
+}
+
+struct pci_dev;
+
+static inline int
+sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
+{
+
+	if (vchan > 1) {
+		return -1;
+	}
+
+	if (!(*addr >> 32))	/* Using a mask here would be cleaner */
+		return 0;	/* but this generates better code */
+
+	if (vchan == 1) {
+		/* Set Bit 57 */
+		*addr |= (1UL << 57);
+	} else {
+		/* Clear Bit 57 */
+		*addr &= ~(1UL << 57);
+	}
+
+	return 0;
+}
+
+#endif	/* _ASM_SN_IO_H */
diff --git a/arch/ia64/include/asm/sn/ioc3.h b/arch/ia64/include/asm/sn/ioc3.h
new file mode 100644
index 0000000..95ed6cc
--- /dev/null
+++ b/arch/ia64/include/asm/sn/ioc3.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ */
+#ifndef IA64_SN_IOC3_H
+#define IA64_SN_IOC3_H
+
+/* serial port register map */
+struct ioc3_serialregs {
+	uint32_t sscr;
+	uint32_t stpir;
+	uint32_t stcir;
+	uint32_t srpir;
+	uint32_t srcir;
+	uint32_t srtr;
+	uint32_t shadow;
+};
+
+/* SUPERIO uart register map */
+struct ioc3_uartregs {
+	char iu_lcr;
+	union {
+		char iir;	/* read only */
+		char fcr;	/* write only */
+	} u3;
+	union {
+		char ier;	/* DLAB == 0 */
+		char dlm;	/* DLAB == 1 */
+	} u2;
+	union {
+		char rbr;	/* read only, DLAB == 0 */
+		char thr;	/* write only, DLAB == 0 */
+		char dll;	/* DLAB == 1 */
+	} u1;
+	char iu_scr;
+	char iu_msr;
+	char iu_lsr;
+	char iu_mcr;
+};
+
+#define iu_rbr u1.rbr
+#define iu_thr u1.thr
+#define iu_dll u1.dll
+#define iu_ier u2.ier
+#define iu_dlm u2.dlm
+#define iu_iir u3.iir
+#define iu_fcr u3.fcr
+
+struct ioc3_sioregs {
+	char fill[0x170];
+	struct ioc3_uartregs uartb;
+	struct ioc3_uartregs uarta;
+};
+
+/* PCI IO/mem space register map */
+struct ioc3 {
+	uint32_t pci_id;
+	uint32_t pci_scr;
+	uint32_t pci_rev;
+	uint32_t pci_lat;
+	uint32_t pci_addr;
+	uint32_t pci_err_addr_l;
+	uint32_t pci_err_addr_h;
+
+	uint32_t sio_ir;
+	/* these registers are read-only for general kernel code. To
+	 * modify them use the functions in ioc3.c
+	 */
+	uint32_t sio_ies;
+	uint32_t sio_iec;
+	uint32_t sio_cr;
+	uint32_t int_out;
+	uint32_t mcr;
+	uint32_t gpcr_s;
+	uint32_t gpcr_c;
+	uint32_t gpdr;
+	uint32_t gppr[9];
+	char fill[0x4c];
+
+	/* serial port registers */
+	uint32_t sbbr_h;
+	uint32_t sbbr_l;
+
+	struct ioc3_serialregs port_a;
+	struct ioc3_serialregs port_b;
+	char fill1[0x1ff10];
+	/* superio registers */
+	struct ioc3_sioregs sregs;
+};
+
+/* These don't exist on the ioc3 serial card... */
+#define eier	fill1[8]
+#define eisr	fill1[4]
+
+#define PCI_LAT			0xc	/* Latency Timer */
+#define PCI_SCR_DROP_MODE_EN	0x00008000 /* drop pios on parity err */
+#define UARTA_BASE		0x178
+#define UARTB_BASE		0x170
+
+
+/* bitmasks for serial RX status byte */
+#define RXSB_OVERRUN		0x01	/* char(s) lost */
+#define RXSB_PAR_ERR		0x02	/* parity error */
+#define RXSB_FRAME_ERR		0x04	/* framing error */
+#define RXSB_BREAK		0x08	/* break character */
+#define RXSB_CTS		0x10	/* state of CTS */
+#define RXSB_DCD		0x20	/* state of DCD */
+#define RXSB_MODEM_VALID	0x40	/* DCD, CTS and OVERRUN are valid */
+#define RXSB_DATA_VALID		0x80	/* FRAME_ERR PAR_ERR & BREAK valid */
+
+/* bitmasks for serial TX control byte */
+#define TXCB_INT_WHEN_DONE	0x20	/* interrupt after this byte is sent */
+#define TXCB_INVALID		0x00	/* byte is invalid */
+#define TXCB_VALID		0x40	/* byte is valid */
+#define TXCB_MCR		0x80	/* data<7:0> to modem cntrl register */
+#define TXCB_DELAY		0xc0	/* delay data<7:0> mSec */
+
+/* bitmasks for SBBR_L */
+#define SBBR_L_SIZE		0x00000001	/* 0 1KB rings, 1 4KB rings */
+
+/* bitmasks for SSCR_<A:B> */
+#define SSCR_RX_THRESHOLD	0x000001ff	/* hiwater mark */
+#define SSCR_TX_TIMER_BUSY	0x00010000	/* TX timer in progress */
+#define SSCR_HFC_EN		0x00020000	/* h/w flow cntrl enabled */
+#define SSCR_RX_RING_DCD	0x00040000	/* postRX record on delta-DCD */
+#define SSCR_RX_RING_CTS	0x00080000	/* postRX record on delta-CTS */
+#define SSCR_HIGH_SPD		0x00100000	/* 4X speed */
+#define SSCR_DIAG		0x00200000	/* bypass clock divider */
+#define SSCR_RX_DRAIN		0x08000000	/* drain RX buffer to memory */
+#define SSCR_DMA_EN		0x10000000	/* enable ring buffer DMA */
+#define SSCR_DMA_PAUSE		0x20000000	/* pause DMA */
+#define SSCR_PAUSE_STATE	0x40000000	/* set when PAUSE takes effect*/
+#define SSCR_RESET		0x80000000	/* reset DMA channels */
+
+/* all producer/comsumer pointers are the same bitfield */
+#define PROD_CONS_PTR_4K	0x00000ff8	/* for 4K buffers */
+#define PROD_CONS_PTR_1K	0x000003f8	/* for 1K buffers */
+#define PROD_CONS_PTR_OFF	3
+
+/* bitmasks for SRCIR_<A:B> */
+#define SRCIR_ARM		0x80000000	/* arm RX timer */
+
+/* bitmasks for SHADOW_<A:B> */
+#define SHADOW_DR		0x00000001	/* data ready */
+#define SHADOW_OE		0x00000002	/* overrun error */
+#define SHADOW_PE		0x00000004	/* parity error */
+#define SHADOW_FE		0x00000008	/* framing error */
+#define SHADOW_BI		0x00000010	/* break interrupt */
+#define SHADOW_THRE		0x00000020	/* transmit holding reg empty */
+#define SHADOW_TEMT		0x00000040	/* transmit shift reg empty */
+#define SHADOW_RFCE		0x00000080	/* char in RX fifo has error */
+#define SHADOW_DCTS		0x00010000	/* delta clear to send */
+#define SHADOW_DDCD		0x00080000	/* delta data carrier detect */
+#define SHADOW_CTS		0x00100000	/* clear to send */
+#define SHADOW_DCD		0x00800000	/* data carrier detect */
+#define SHADOW_DTR		0x01000000	/* data terminal ready */
+#define SHADOW_RTS		0x02000000	/* request to send */
+#define SHADOW_OUT1		0x04000000	/* 16550 OUT1 bit */
+#define SHADOW_OUT2		0x08000000	/* 16550 OUT2 bit */
+#define SHADOW_LOOP		0x10000000	/* loopback enabled */
+
+/* bitmasks for SRTR_<A:B> */
+#define SRTR_CNT		0x00000fff	/* reload value for RX timer */
+#define SRTR_CNT_VAL		0x0fff0000	/* current value of RX timer */
+#define SRTR_CNT_VAL_SHIFT	16
+#define SRTR_HZ			16000		/* SRTR clock frequency */
+
+/* bitmasks for SIO_IR, SIO_IEC and SIO_IES  */
+#define SIO_IR_SA_TX_MT		0x00000001	/* Serial port A TX empty */
+#define SIO_IR_SA_RX_FULL	0x00000002	/* port A RX buf full */
+#define SIO_IR_SA_RX_HIGH	0x00000004	/* port A RX hiwat */
+#define SIO_IR_SA_RX_TIMER	0x00000008	/* port A RX timeout */
+#define SIO_IR_SA_DELTA_DCD	0x00000010	/* port A delta DCD */
+#define SIO_IR_SA_DELTA_CTS	0x00000020	/* port A delta CTS */
+#define SIO_IR_SA_INT		0x00000040	/* port A pass-thru intr */
+#define SIO_IR_SA_TX_EXPLICIT	0x00000080	/* port A explicit TX thru */
+#define SIO_IR_SA_MEMERR	0x00000100	/* port A PCI error */
+#define SIO_IR_SB_TX_MT		0x00000200
+#define SIO_IR_SB_RX_FULL	0x00000400
+#define SIO_IR_SB_RX_HIGH	0x00000800
+#define SIO_IR_SB_RX_TIMER	0x00001000
+#define SIO_IR_SB_DELTA_DCD	0x00002000
+#define SIO_IR_SB_DELTA_CTS	0x00004000
+#define SIO_IR_SB_INT		0x00008000
+#define SIO_IR_SB_TX_EXPLICIT	0x00010000
+#define SIO_IR_SB_MEMERR	0x00020000
+#define SIO_IR_PP_INT		0x00040000	/* P port pass-thru intr */
+#define SIO_IR_PP_INTA		0x00080000	/* PP context A thru */
+#define SIO_IR_PP_INTB		0x00100000	/* PP context B thru */
+#define SIO_IR_PP_MEMERR	0x00200000	/* PP PCI error */
+#define SIO_IR_KBD_INT		0x00400000	/* kbd/mouse intr */
+#define SIO_IR_RT_INT		0x08000000	/* RT output pulse */
+#define SIO_IR_GEN_INT1		0x10000000	/* RT input pulse */
+#define SIO_IR_GEN_INT_SHIFT	28
+
+/* per device interrupt masks */
+#define SIO_IR_SA		(SIO_IR_SA_TX_MT | \
+				 SIO_IR_SA_RX_FULL | \
+				 SIO_IR_SA_RX_HIGH | \
+				 SIO_IR_SA_RX_TIMER | \
+				 SIO_IR_SA_DELTA_DCD | \
+				 SIO_IR_SA_DELTA_CTS | \
+				 SIO_IR_SA_INT | \
+				 SIO_IR_SA_TX_EXPLICIT | \
+				 SIO_IR_SA_MEMERR)
+
+#define SIO_IR_SB		(SIO_IR_SB_TX_MT | \
+				 SIO_IR_SB_RX_FULL | \
+				 SIO_IR_SB_RX_HIGH | \
+				 SIO_IR_SB_RX_TIMER | \
+				 SIO_IR_SB_DELTA_DCD | \
+				 SIO_IR_SB_DELTA_CTS | \
+				 SIO_IR_SB_INT | \
+				 SIO_IR_SB_TX_EXPLICIT | \
+				 SIO_IR_SB_MEMERR)
+
+#define SIO_IR_PP		(SIO_IR_PP_INT | SIO_IR_PP_INTA | \
+				 SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
+#define SIO_IR_RT		(SIO_IR_RT_INT | SIO_IR_GEN_INT1)
+
+/* bitmasks for SIO_CR */
+#define SIO_CR_CMD_PULSE_SHIFT 15
+#define SIO_CR_SER_A_BASE_SHIFT 1
+#define SIO_CR_SER_B_BASE_SHIFT 8
+#define SIO_CR_ARB_DIAG		0x00380000	/* cur !enet PCI requet (ro) */
+#define SIO_CR_ARB_DIAG_TXA	0x00000000
+#define SIO_CR_ARB_DIAG_RXA	0x00080000
+#define SIO_CR_ARB_DIAG_TXB	0x00100000
+#define SIO_CR_ARB_DIAG_RXB	0x00180000
+#define SIO_CR_ARB_DIAG_PP	0x00200000
+#define SIO_CR_ARB_DIAG_IDLE	0x00400000	/* 0 -> active request (ro) */
+
+/* defs for some of the generic I/O pins */
+#define GPCR_PHY_RESET		0x20	/* pin is output to PHY reset */
+#define GPCR_UARTB_MODESEL	0x40	/* pin is output to port B mode sel */
+#define GPCR_UARTA_MODESEL	0x80	/* pin is output to port A mode sel */
+
+#define GPPR_PHY_RESET_PIN	5	/* GIO pin controlling phy reset */
+#define GPPR_UARTB_MODESEL_PIN	6	/* GIO pin cntrling uartb modeselect */
+#define GPPR_UARTA_MODESEL_PIN	7	/* GIO pin cntrling uarta modeselect */
+
+#endif /* IA64_SN_IOC3_H */
diff --git a/arch/ia64/include/asm/sn/klconfig.h b/arch/ia64/include/asm/sn/klconfig.h
new file mode 100644
index 0000000..bcbf209
--- /dev/null
+++ b/arch/ia64/include/asm/sn/klconfig.h
@@ -0,0 +1,246 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/klconfig.h>.
+ *
+ * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_KLCONFIG_H
+#define _ASM_IA64_SN_KLCONFIG_H
+
+/*
+ * The KLCONFIG structures store info about the various BOARDs found
+ * during Hardware Discovery. In addition, it stores info about the
+ * components found on the BOARDs.
+ */
+
+typedef s32 klconf_off_t;
+
+
+/* Functions/macros needed to use this structure */
+
+typedef struct kl_config_hdr {
+	char		pad[20];
+	klconf_off_t	ch_board_info;	/* the link list of boards */
+	char		pad0[88];
+} kl_config_hdr_t;
+
+
+#define NODE_OFFSET_TO_LBOARD(nasid,off)        (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
+
+/*
+ * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
+ * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to 
+ * the LOCAL/current NODE. REMOTE means it is attached to a different
+ * node.(TBD - Need a way to treat ROUTER boards.)
+ *
+ * There are 2 different structures to represent these boards -
+ * lboard - Local board, rboard - remote board. These 2 structures
+ * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
+ * Figure below). The first byte of the rboard or lboard structure
+ * is used to find out its type - no unions are used.
+ * If it is a lboard, then the config info of this board will be found
+ * on the local node. (LOCAL NODE BASE + offset value gives pointer to 
+ * the structure.
+ * If it is a rboard, the local structure contains the node number
+ * and the offset of the beginning of the LINKED LIST on the remote node.
+ * The details of the hardware on a remote node can be built locally,
+ * if required, by reading the LINKED LIST on the remote node and 
+ * ignoring all the rboards on that node.
+ *
+ * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the 
+ * First board info on the remote node. The remote node list is 
+ * traversed as the local list, using the REMOTE BASE ADDRESS and not
+ * the local base address and ignoring all rboard values.
+ *
+ * 
+ KLCONFIG
+
+ +------------+      +------------+      +------------+      +------------+
+ |  lboard    |  +-->|   lboard   |  +-->|   rboard   |  +-->|   lboard   |
+ +------------+  |   +------------+  |   +------------+  |   +------------+
+ | board info |  |   | board info |  |   |errinfo,bptr|  |   | board info |
+ +------------+  |   +------------+  |   +------------+  |   +------------+
+ | offset     |--+   |  offset    |--+   |  offset    |--+   |offset=NULL |
+ +------------+      +------------+      +------------+      +------------+
+
+
+ +------------+
+ | board info |
+ +------------+       +--------------------------------+
+ | compt 1    |------>| type, rev, diaginfo, size ...  |  (CPU)
+ +------------+       +--------------------------------+
+ | compt 2    |--+
+ +------------+  |    +--------------------------------+
+ |  ...       |  +--->| type, rev, diaginfo, size ...  |  (MEM_BANK)
+ +------------+       +--------------------------------+
+ | errinfo    |--+
+ +------------+  |    +--------------------------------+
+                 +--->|r/l brd errinfo,compt err flags |
+                      +--------------------------------+
+
+ *
+ * Each BOARD consists of COMPONENTs and the BOARD structure has 
+ * pointers (offsets) to its COMPONENT structure.
+ * The COMPONENT structure has version info, size and speed info, revision,
+ * error info and the NIC info. This structure can accommodate any
+ * BOARD with arbitrary COMPONENT composition.
+ *
+ * The ERRORINFO part of each BOARD has error information
+ * that describes errors about the BOARD itself. It also has flags to
+ * indicate the COMPONENT(s) on the board that have errors. The error 
+ * information specific to the COMPONENT is present in the respective 
+ * COMPONENT structure.
+ *
+ * The ERRORINFO structure is also treated like a COMPONENT, ie. the 
+ * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
+ * structure also has a pointer to the ERRORINFO structure. This is 
+ * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
+ * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where 
+ * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
+ * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info 
+ * which is present on the REMOTE NODE.(TBD)
+ * REMOTE ERRINFO can be stored on any of the nearest nodes 
+ * or on all the nearest nodes.(TBD)
+ * Like BOARD structures, REMOTE ERRINFO structures can be built locally
+ * using the rboard errinfo pointer.
+ *
+ * In order to get useful information from this Data organization, a set of
+ * interface routines are provided (TBD). The important thing to remember while
+ * manipulating the structures, is that, the NODE number information should
+ * be used. If the NODE is non-zero (remote) then each offset should
+ * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR. 
+ * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
+ * 
+ * Note that these structures do not provide much info about connectivity.
+ * That info will be part of HWGRAPH, which is an extension of the cfg_t
+ * data structure. (ref IP27prom/cfg.h) It has to be extended to include
+ * the IO part of the Network(TBD).
+ *
+ * The data structures below define the above concepts.
+ */
+
+
+/*
+ * BOARD classes
+ */
+
+#define KLCLASS_MASK	0xf0   
+#define KLCLASS_NONE	0x00
+#define KLCLASS_NODE	0x10             /* CPU, Memory and HUB board */
+#define KLCLASS_CPU	KLCLASS_NODE	
+#define KLCLASS_IO	0x20             /* BaseIO, 4 ch SCSI, ethernet, FDDI 
+					    and the non-graphics widget boards */
+#define KLCLASS_ROUTER	0x30             /* Router board */
+#define KLCLASS_MIDPLANE 0x40            /* We need to treat this as a board
+                                            so that we can record error info */
+#define KLCLASS_IOBRICK	0x70		/* IP35 iobrick */
+#define KLCLASS_MAX	8		/* Bump this if a new CLASS is added */
+
+#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
+
+
+/*
+ * board types
+ */
+
+#define KLTYPE_MASK	0x0f
+#define KLTYPE(_x)      ((_x) & KLTYPE_MASK)
+
+#define KLTYPE_SNIA	(KLCLASS_CPU | 0x1)
+#define KLTYPE_TIO	(KLCLASS_CPU | 0x2)
+
+#define KLTYPE_ROUTER     (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
+#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
+
+#define KLTYPE_IOBRICK_XBOW	(KLCLASS_MIDPLANE | 0x2)
+
+#define KLTYPE_IOBRICK		(KLCLASS_IOBRICK | 0x0)
+#define KLTYPE_NBRICK		(KLCLASS_IOBRICK | 0x4)
+#define KLTYPE_PXBRICK		(KLCLASS_IOBRICK | 0x6)
+#define KLTYPE_IXBRICK		(KLCLASS_IOBRICK | 0x7)
+#define KLTYPE_CGBRICK		(KLCLASS_IOBRICK | 0x8)
+#define KLTYPE_OPUSBRICK	(KLCLASS_IOBRICK | 0x9)
+#define KLTYPE_SABRICK          (KLCLASS_IOBRICK | 0xa)
+#define KLTYPE_IABRICK		(KLCLASS_IOBRICK | 0xb)
+#define KLTYPE_PABRICK          (KLCLASS_IOBRICK | 0xc)
+#define KLTYPE_GABRICK		(KLCLASS_IOBRICK | 0xd)
+
+
+/* 
+ * board structures
+ */
+
+#define MAX_COMPTS_PER_BRD 24
+
+typedef struct lboard_s {
+	klconf_off_t 	brd_next_any;     /* Next BOARD */
+	unsigned char 	struct_type;      /* type of structure, local or remote */
+	unsigned char 	brd_type;         /* type+class */
+	unsigned char 	brd_sversion;     /* version of this structure */
+        unsigned char 	brd_brevision;    /* board revision */
+        unsigned char 	brd_promver;      /* board prom version, if any */
+ 	unsigned char 	brd_flags;        /* Enabled, Disabled etc */
+	unsigned char 	brd_slot;         /* slot number */
+	unsigned short	brd_debugsw;      /* Debug switches */
+	geoid_t		brd_geoid;	  /* geo id */
+	partid_t 	brd_partition;    /* Partition number */
+        unsigned short 	brd_diagval;      /* diagnostic value */
+        unsigned short 	brd_diagparm;     /* diagnostic parameter */
+        unsigned char 	brd_inventory;    /* inventory history */
+        unsigned char 	brd_numcompts;    /* Number of components */
+        nic_t         	brd_nic;          /* Number in CAN */
+	nasid_t		brd_nasid;        /* passed parameter */
+	klconf_off_t 	brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+	klconf_off_t 	brd_errinfo;      /* Board's error information */
+	struct lboard_s *brd_parent;	  /* Logical parent for this brd */
+	char            pad0[4];
+	unsigned char	brd_confidence;	  /* confidence that the board is bad */
+	nasid_t		brd_owner;        /* who owns this board */
+	unsigned char 	brd_nic_flags;    /* To handle 8 more NICs */
+	char		pad1[24];	  /* future expansion */
+	char		brd_name[32];
+	nasid_t		brd_next_same_host; /* host of next brd w/same nasid */
+	klconf_off_t	brd_next_same;    /* Next BOARD with same nasid */
+} lboard_t;
+
+/*
+ * Generic info structure. This stores common info about a 
+ * component.
+ */
+ 
+typedef struct klinfo_s {                  /* Generic info */
+        unsigned char   struct_type;       /* type of this structure */
+        unsigned char   struct_version;    /* version of this structure */
+        unsigned char   flags;            /* Enabled, disabled etc */
+        unsigned char   revision;         /* component revision */
+        unsigned short  diagval;          /* result of diagnostics */
+        unsigned short  diagparm;         /* diagnostic parameter */
+        unsigned char   inventory;        /* previous inventory status */
+        unsigned short  partid;		   /* widget part number */
+	nic_t 		nic;              /* MUst be aligned properly */
+        unsigned char   physid;           /* physical id of component */
+        unsigned int    virtid;           /* virtual id as seen by system */
+	unsigned char	widid;	          /* Widget id - if applicable */
+	nasid_t		nasid;            /* node number - from parent */
+	char		pad1;		  /* pad out structure. */
+	char		pad2;		  /* pad out structure. */
+	void		*data;
+        klconf_off_t	errinfo;          /* component specific errors */
+        unsigned short  pad3;             /* pci fields have moved over to */
+        unsigned short  pad4;             /* klbri_t */
+} klinfo_t ;
+
+
+static inline lboard_t *find_lboard_next(lboard_t * brd)
+{
+	if (brd && brd->brd_next_any)
+		return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
+        return NULL;
+}
+
+#endif /* _ASM_IA64_SN_KLCONFIG_H */
diff --git a/arch/ia64/include/asm/sn/l1.h b/arch/ia64/include/asm/sn/l1.h
new file mode 100644
index 0000000..344bf44
--- /dev/null
+++ b/arch/ia64/include/asm/sn/l1.h
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#ifndef _ASM_IA64_SN_L1_H
+#define _ASM_IA64_SN_L1_H
+
+/* brick type response codes */
+#define L1_BRICKTYPE_PX         0x23            /* # */
+#define L1_BRICKTYPE_PE         0x25            /* % */
+#define L1_BRICKTYPE_N_p0       0x26            /* & */
+#define L1_BRICKTYPE_IP45       0x34            /* 4 */
+#define L1_BRICKTYPE_IP41       0x35            /* 5 */
+#define L1_BRICKTYPE_TWISTER    0x36            /* 6 */ /* IP53 & ROUTER */
+#define L1_BRICKTYPE_IX         0x3d            /* = */
+#define L1_BRICKTYPE_IP34       0x61            /* a */
+#define L1_BRICKTYPE_GA		0x62            /* b */
+#define L1_BRICKTYPE_C          0x63            /* c */
+#define L1_BRICKTYPE_OPUS_TIO	0x66		/* f */
+#define L1_BRICKTYPE_I          0x69            /* i */
+#define L1_BRICKTYPE_N          0x6e            /* n */
+#define L1_BRICKTYPE_OPUS       0x6f		/* o */
+#define L1_BRICKTYPE_P          0x70            /* p */
+#define L1_BRICKTYPE_R          0x72            /* r */
+#define L1_BRICKTYPE_CHI_CG     0x76            /* v */
+#define L1_BRICKTYPE_X          0x78            /* x */
+#define L1_BRICKTYPE_X2         0x79            /* y */
+#define L1_BRICKTYPE_SA		0x5e            /* ^ */
+#define L1_BRICKTYPE_PA		0x6a            /* j */
+#define L1_BRICKTYPE_IA		0x6b            /* k */
+#define L1_BRICKTYPE_ATHENA	0x2b            /* + */
+#define L1_BRICKTYPE_DAYTONA	0x7a            /* z */
+#define L1_BRICKTYPE_1932	0x2c		/* . */
+#define L1_BRICKTYPE_191010	0x2e		/* , */
+
+/* board type response codes */
+#define L1_BOARDTYPE_IP69       0x0100          /* CA */
+#define L1_BOARDTYPE_IP63       0x0200          /* CB */
+#define L1_BOARDTYPE_BASEIO     0x0300          /* IB */
+#define L1_BOARDTYPE_PCIE2SLOT  0x0400          /* IC */
+#define L1_BOARDTYPE_PCIX3SLOT  0x0500          /* ID */
+#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600       /* IE */
+#define L1_BOARDTYPE_ABACUS     0x0700          /* AB */
+#define L1_BOARDTYPE_DAYTONA    0x0800          /* AD */
+#define L1_BOARDTYPE_INVAL      (-1)            /* invalid brick type */
+
+#endif /* _ASM_IA64_SN_L1_H */
diff --git a/arch/ia64/include/asm/sn/leds.h b/arch/ia64/include/asm/sn/leds.h
new file mode 100644
index 0000000..66cf8c4
--- /dev/null
+++ b/arch/ia64/include/asm/sn/leds.h
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_LEDS_H
+#define _ASM_IA64_SN_LEDS_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/shub_mmr.h>
+
+#define LED0		(LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
+#define LED_CPU_SHIFT	16
+
+#define LED_CPU_HEARTBEAT	0x01
+#define LED_CPU_ACTIVITY	0x02
+#define LED_ALWAYS_SET		0x00
+
+/*
+ * Basic macros for flashing the LEDS on an SGI SN.
+ */
+
+static __inline__ void
+set_led_bits(u8 value, u8 mask)
+{
+	pda->led_state = (pda->led_state & ~mask) | (value & mask);
+	*pda->led_address = (short) pda->led_state;
+}
+
+#endif /* _ASM_IA64_SN_LEDS_H */
+
diff --git a/arch/ia64/include/asm/sn/module.h b/arch/ia64/include/asm/sn/module.h
new file mode 100644
index 0000000..734e980
--- /dev/null
+++ b/arch/ia64/include/asm/sn/module.h
@@ -0,0 +1,127 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_MODULE_H
+#define _ASM_IA64_SN_MODULE_H
+
+/* parameter for format_module_id() */
+#define MODULE_FORMAT_BRIEF	1
+#define MODULE_FORMAT_LONG	2
+#define MODULE_FORMAT_LCD	3
+
+/*
+ *	Module id format
+ *
+ *	31-16	Rack ID (encoded class, group, number - 16-bit unsigned int)
+ *	 15-8	Brick type (8-bit ascii character)
+ *	  7-0	Bay (brick position in rack (0-63) - 8-bit unsigned int)
+ *
+ */
+
+/*
+ * Macros for getting the brick type
+ */
+#define MODULE_BTYPE_MASK	0xff00
+#define MODULE_BTYPE_SHFT	8
+#define MODULE_GET_BTYPE(_m)	(((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
+#define MODULE_BT_TO_CHAR(_b)	((char)(_b))
+#define MODULE_GET_BTCHAR(_m)	(MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
+
+/*
+ * Macros for getting the rack ID.
+ */
+#define MODULE_RACK_MASK	0xffff0000
+#define MODULE_RACK_SHFT	16
+#define MODULE_GET_RACK(_m)	(((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
+
+/*
+ * Macros for getting the brick position
+ */
+#define MODULE_BPOS_MASK	0x00ff
+#define MODULE_BPOS_SHFT	0
+#define MODULE_GET_BPOS(_m)	(((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ *   class (0==CPU/mixed, 1==I/O), group, number
+ *
+ * Rack number is stored just as it is displayed on the screen:
+ * a 3-decimal-digit number.
+ */
+#define RACK_CLASS_DVDR         100
+#define RACK_GROUP_DVDR         10
+#define RACK_NUM_DVDR           1
+
+#define RACK_CREATE_RACKID(_c, _g, _n)  ((_c) * RACK_CLASS_DVDR +       \
+        (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
+
+#define RACK_GET_CLASS(_r)              ((_r) / RACK_CLASS_DVDR)
+#define RACK_GET_GROUP(_r)              (((_r) - RACK_GET_CLASS(_r) *   \
+            RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
+#define RACK_GET_NUM(_r)                (((_r) - RACK_GET_CLASS(_r) *   \
+            RACK_CLASS_DVDR - RACK_GET_GROUP(_r) *      \
+            RACK_GROUP_DVDR) / RACK_NUM_DVDR)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ *   class      1 bit, 0==CPU/mixed, 1==I/O
+ *   group      2 bits for CPU/mixed, 3 bits for I/O
+ *   number     3 bits for CPU/mixed, 2 bits for I/O (1 based)
+ */
+#define RACK_GROUP_BITS(_r)     (RACK_GET_CLASS(_r) ? 3 : 2)
+#define RACK_NUM_BITS(_r)       (RACK_GET_CLASS(_r) ? 2 : 3)
+
+#define RACK_CLASS_MASK(_r)     0x20
+#define RACK_CLASS_SHFT(_r)     5
+#define RACK_ADD_CLASS(_r, _c)  \
+        ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
+
+#define RACK_GROUP_SHFT(_r)     RACK_NUM_BITS(_r)
+#define RACK_GROUP_MASK(_r)     \
+        ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
+#define RACK_ADD_GROUP(_r, _g)  \
+        ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
+
+#define RACK_NUM_SHFT(_r)       0
+#define RACK_NUM_MASK(_r)       \
+        ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
+#define RACK_ADD_NUM(_r, _n)    \
+        ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
+
+
+/*
+ * Brick type definitions
+ */
+#define MAX_BRICK_TYPES         256 /* brick type is stored as uchar */
+
+extern char brick_types[];
+
+#define MODULE_CBRICK           0
+#define MODULE_RBRICK           1
+#define MODULE_IBRICK           2
+#define MODULE_KBRICK           3
+#define MODULE_XBRICK           4
+#define MODULE_DBRICK           5
+#define MODULE_PBRICK           6
+#define MODULE_NBRICK           7
+#define MODULE_PEBRICK          8
+#define MODULE_PXBRICK          9
+#define MODULE_IXBRICK          10
+#define MODULE_CGBRICK		11
+#define MODULE_OPUSBRICK        12
+#define MODULE_SABRICK		13	/* TIO BringUp Brick */
+#define MODULE_IABRICK		14
+#define MODULE_PABRICK		15
+#define MODULE_GABRICK		16
+#define MODULE_OPUS_TIO		17	/* OPUS TIO Riser */
+
+extern char brick_types[];
+extern void format_module_id(char *, moduleid_t, int);
+
+#endif /* _ASM_IA64_SN_MODULE_H */
diff --git a/arch/ia64/include/asm/sn/mspec.h b/arch/ia64/include/asm/sn/mspec.h
new file mode 100644
index 0000000..c1d3c50
--- /dev/null
+++ b/arch/ia64/include/asm/sn/mspec.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_MSPEC_H
+#define _ASM_IA64_SN_MSPEC_H
+
+#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
+
+#define FETCHOP_LOAD		0
+#define FETCHOP_INCREMENT	8
+#define FETCHOP_DECREMENT	16
+#define FETCHOP_CLEAR		24
+
+#define FETCHOP_STORE		0
+#define FETCHOP_AND		24
+#define FETCHOP_OR		32
+
+#define FETCHOP_CLEAR_CACHE	56
+
+#define FETCHOP_LOAD_OP(addr, op) ( \
+         *(volatile long *)((char*) (addr) + (op)))
+
+#define FETCHOP_STORE_OP(addr, op, x) ( \
+         *(volatile long *)((char*) (addr) + (op)) = (long) (x))
+
+#ifdef __KERNEL__
+
+/*
+ * Each Atomic Memory Operation (amo, formerly known as fetchop)
+ * variable is 64 bytes long.  The first 8 bytes are used.  The
+ * remaining 56 bytes are unaddressable due to the operation taking
+ * that portion of the address.
+ *
+ * NOTE: The amo structure _MUST_ be placed in either the first or second
+ * half of the cache line.  The cache line _MUST NOT_ be used for anything
+ * other than additional amo entries.  This is because there are two
+ * addresses which reference the same physical cache line.  One will
+ * be a cached entry with the memory type bits all set.  This address
+ * may be loaded into processor cache.  The amo will be referenced
+ * uncached via the memory special memory type.  If any portion of the
+ * cached cache-line is modified, when that line is flushed, it will
+ * overwrite the uncached value in physical memory and lead to
+ * inconsistency.
+ */
+struct amo {
+        u64 variable;
+        u64 unused[7];
+};
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_SN_MSPEC_H */
diff --git a/arch/ia64/include/asm/sn/nodepda.h b/arch/ia64/include/asm/sn/nodepda.h
new file mode 100644
index 0000000..7c8b471
--- /dev/null
+++ b/arch/ia64/include/asm/sn/nodepda.h
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_NODEPDA_H
+#define _ASM_IA64_SN_NODEPDA_H
+
+
+#include <asm/irq.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/bte.h>
+
+/*
+ * NUMA Node-Specific Data structures are defined in this file.
+ * In particular, this is the location of the node PDA.
+ * A pointer to the right node PDA is saved in each CPU PDA.
+ */
+
+/*
+ * Node-specific data structure.
+ *
+ * One of these structures is allocated on each node of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together 
+ * all per-node data structures. 
+ */
+struct phys_cpuid {
+	short			nasid;
+	char			subnode;
+	char			slice;
+};
+
+struct nodepda_s {
+	void 		*pdinfo;	/* Platform-dependent per-node info */
+
+	/*
+	 * The BTEs on this node are shared by the local cpus
+	 */
+	struct bteinfo_s	bte_if[MAX_BTES_PER_NODE];	/* Virtual Interface */
+	struct timer_list	bte_recovery_timer;
+	spinlock_t		bte_recovery_lock;
+
+	/* 
+	 * Array of pointers to the nodepdas for each node.
+	 */
+	struct nodepda_s	*pernode_pdaindr[MAX_COMPACT_NODES]; 
+
+	/*
+	 * Array of physical cpu identifiers. Indexed by cpuid.
+	 */
+	struct phys_cpuid	phys_cpuid[NR_CPUS];
+	spinlock_t		ptc_lock ____cacheline_aligned_in_smp;
+};
+
+typedef struct nodepda_s nodepda_t;
+
+/*
+ * Access Functions for node PDA.
+ * Since there is one nodepda for each node, we need a convenient mechanism
+ * to access these nodepdas without cluttering code with #ifdefs.
+ * The next set of definitions provides this.
+ * Routines are expected to use 
+ *
+ *	sn_nodepda   - to access node PDA for the node on which code is running
+ *	NODEPDA(cnodeid)   - to access node PDA for cnodeid
+ */
+
+DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+#define sn_nodepda		__this_cpu_read(__sn_nodepda)
+#define	NODEPDA(cnodeid)	(sn_nodepda->pernode_pdaindr[cnodeid])
+
+/*
+ * Check if given a compact node id the corresponding node has all the
+ * cpus disabled. 
+ */
+#define is_headless_node(cnodeid)	(nr_cpus_node(cnodeid) == 0)
+
+#endif /* _ASM_IA64_SN_NODEPDA_H */
diff --git a/arch/ia64/include/asm/sn/pcibr_provider.h b/arch/ia64/include/asm/sn/pcibr_provider.h
new file mode 100644
index 0000000..da205b7
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pcibr_provider.h
@@ -0,0 +1,150 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibus_provider_defs.h>
+
+/* Workarounds */
+#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
+
+#define BUSTYPE_MASK                    0x1
+
+/* Macros given a pcibus structure */
+#define IS_PCIX(ps)     ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
+#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
+                asic == PCIIO_ASIC_TYPE_TIOCP)
+#define IS_PIC_SOFT(ps)     (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
+#define IS_TIOCP_SOFT(ps)   (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_TIOCP)
+
+
+/*
+ * The different PCI Bridge types supported on the SGI Altix platforms
+ */
+#define PCIBR_BRIDGETYPE_UNKNOWN       -1
+#define PCIBR_BRIDGETYPE_PIC            2
+#define PCIBR_BRIDGETYPE_TIOCP          3
+
+/*
+ * Bridge 64bit Direct Map Attributes
+ */
+#define PCI64_ATTR_PREF                 (1ull << 59)
+#define PCI64_ATTR_PREC                 (1ull << 58)
+#define PCI64_ATTR_VIRTUAL              (1ull << 57)
+#define PCI64_ATTR_BAR                  (1ull << 56)
+#define PCI64_ATTR_SWAP                 (1ull << 55)
+#define PCI64_ATTR_VIRTUAL1             (1ull << 54)
+
+#define PCI32_LOCAL_BASE                0
+#define PCI32_MAPPED_BASE               0x40000000
+#define PCI32_DIRECT_BASE               0x80000000
+
+#define IS_PCI32_MAPPED(x)              ((u64)(x) < PCI32_DIRECT_BASE && \
+                                         (u64)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x)              ((u64)(x) >= PCI32_MAPPED_BASE)
+
+
+/*
+ * Bridge PMU Address Transaltion Entry Attibutes
+ */
+#define PCI32_ATE_V                     (0x1 << 0)
+#define PCI32_ATE_CO                    (0x1 << 1)	/* PIC ASIC ONLY */
+#define PCI32_ATE_PIO                   (0x1 << 1)	/* TIOCP ASIC ONLY */
+#define PCI32_ATE_MSI                   (0x1 << 2)
+#define PCI32_ATE_PREF                  (0x1 << 3)
+#define PCI32_ATE_BAR                   (0x1 << 4)
+#define PCI32_ATE_ADDR_SHFT             12
+
+#define MINIMAL_ATES_REQUIRED(addr, size) \
+	(IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
+
+#define MINIMAL_ATE_FLAG(addr, size) \
+	(MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define ATE_SWAPSHIFT                   29
+#define ATE_SWAP_ON(x)                  ((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x)                 ((x) &= ~(1 << ATE_SWAPSHIFT))
+
+/*
+ * I/O page size
+ */
+#if PAGE_SIZE < 16384
+#define IOPFNSHIFT                      12      /* 4K per mapped page */
+#else
+#define IOPFNSHIFT                      14      /* 16K per mapped page */
+#endif
+
+#define IOPGSIZE                        (1 << IOPFNSHIFT)
+#define IOPG(x)                         ((x) >> IOPFNSHIFT)
+#define IOPGOFF(x)                      ((x) & (IOPGSIZE-1))
+
+#define PCIBR_DEV_SWAP_DIR              (1ull << 19)
+#define PCIBR_CTRL_PAGE_SIZE            (0x1 << 21)
+
+/*
+ * PMU resources.
+ */
+struct ate_resource{
+	u64 *ate;
+	u64 num_ate;
+	u64 lowest_free_index;
+};
+
+struct pcibus_info {
+	struct pcibus_bussoft	pbi_buscommon;   /* common header */
+	u32                pbi_moduleid;
+	short                   pbi_bridge_type;
+	short                   pbi_bridge_mode;
+
+	struct ate_resource     pbi_int_ate_resource;
+	u64                pbi_int_ate_size;
+
+	u64                pbi_dir_xbase;
+	char                    pbi_hub_xid;
+
+	u64                pbi_devreg[8];
+
+	u32		pbi_valid_devices;
+	u32		pbi_enabled_devices;
+
+	spinlock_t              pbi_lock;
+};
+
+extern int  pcibr_init_provider(void);
+extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
+extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
+extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
+extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
+
+/*
+ * prototypes for the bridge asic register access routines in pcibr_reg.c
+ */
+extern void             pcireg_control_bit_clr(struct pcibus_info *, u64);
+extern void             pcireg_control_bit_set(struct pcibus_info *, u64);
+extern u64         pcireg_tflush_get(struct pcibus_info *);
+extern u64         pcireg_intr_status_get(struct pcibus_info *);
+extern void             pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
+extern void             pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
+extern void             pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
+extern void             pcireg_force_intr_set(struct pcibus_info *, int);
+extern u64         pcireg_wrb_flush_get(struct pcibus_info *, int);
+extern void             pcireg_int_ate_set(struct pcibus_info *, int, u64);
+extern u64 __iomem *	pcireg_int_ate_addr(struct pcibus_info *, int);
+extern void 		pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
+extern void 		pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
+extern int 		pcibr_ate_alloc(struct pcibus_info *, int);
+extern void 		pcibr_ate_free(struct pcibus_info *, int);
+extern void 		ate_write(struct pcibus_info *, int, int, u64);
+extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
+				 void *resp, char **ssdt);
+extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
+				  int action, void *resp);
+extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
+#endif
diff --git a/arch/ia64/include/asm/sn/pcibus_provider_defs.h b/arch/ia64/include/asm/sn/pcibus_provider_defs.h
new file mode 100644
index 0000000..8f7c83d
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pcibus_provider_defs.h
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+
+/*
+ * SN pci asic types.  Do not ever renumber these or reuse values.  The
+ * values must agree with what prom thinks they are.
+ */
+
+#define PCIIO_ASIC_TYPE_UNKNOWN	0
+#define PCIIO_ASIC_TYPE_PPB	1
+#define PCIIO_ASIC_TYPE_PIC	2
+#define PCIIO_ASIC_TYPE_TIOCP	3
+#define PCIIO_ASIC_TYPE_TIOCA	4
+#define PCIIO_ASIC_TYPE_TIOCE	5
+
+#define PCIIO_ASIC_MAX_TYPES	6
+
+/*
+ * Common pciio bus provider data.  There should be one of these as the
+ * first field in any pciio based provider soft structure (e.g. pcibr_soft
+ * tioca_soft, etc).
+ */
+
+struct pcibus_bussoft {
+	u32		bs_asic_type;	/* chipset type */
+	u32		bs_xid;		/* xwidget id */
+	u32		bs_persist_busnum; /* Persistent Bus Number */
+	u32		bs_persist_segment; /* Segment Number */
+	u64		bs_legacy_io;	/* legacy io pio addr */
+	u64		bs_legacy_mem;	/* legacy mem pio addr */
+	u64		bs_base;	/* widget base */
+	struct xwidget_info	*bs_xwidget_info;
+};
+
+struct pci_controller;
+/*
+ * SN pci bus indirection
+ */
+
+struct sn_pcibus_provider {
+	dma_addr_t	(*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
+	dma_addr_t	(*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
+	void		(*dma_unmap)(struct pci_dev *, dma_addr_t, int);
+	void *		(*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
+ 	void		(*force_interrupt)(struct sn_irq_info *);
+ 	void		(*target_interrupt)(struct sn_irq_info *);
+};
+
+/*
+ * Flags used by the map interfaces
+ * bits 3:0 specifies format of passed in address
+ * bit  4   specifies that address is to be used for MSI
+ */
+
+#define SN_DMA_ADDRTYPE(x)	((x) & 0xf)
+#define     SN_DMA_ADDR_PHYS	1	/* address is an xio address. */
+#define     SN_DMA_ADDR_XIO	2	/* address is phys memory */
+#define SN_DMA_MSI		0x10	/* Bus address is to be used for MSI */
+
+extern struct sn_pcibus_provider *sn_pci_provider[];
+#endif				/* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/include/asm/sn/pcidev.h b/arch/ia64/include/asm/sn/pcidev.h
new file mode 100644
index 0000000..1c2382c
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pcidev.h
@@ -0,0 +1,85 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
+#define _ASM_IA64_SN_PCI_PCIDEV_H
+
+#include <linux/pci.h>
+
+/*
+ * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
+ * the pcidev_info structs for all devices under a controller, we keep a
+ * list of pcidev_info under pci_controller->platform_data.
+ */
+struct sn_platform_data {
+	void *provider_soft;
+	struct list_head pcidev_info;
+};
+
+#define SN_PLATFORM_DATA(busdev) \
+	((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data))
+
+#define SN_PCIDEV_INFO(dev)	sn_pcidev_info_get(dev)
+
+/*
+ * Given a pci_bus, return the sn pcibus_bussoft struct.  Note that
+ * this only works for root busses, not for busses represented by PPB's.
+ */
+
+#define SN_PCIBUS_BUSSOFT(pci_bus) \
+	((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
+
+#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
+	((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
+/*
+ * Given a struct pci_dev, return the sn pcibus_bussoft struct.  Note
+ * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
+ * due to possible PPB's in the path.
+ */
+
+#define SN_PCIDEV_BUSSOFT(pci_dev) \
+	(SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
+
+#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
+	(SN_PCIDEV_INFO(pci_dev)->pdi_provider)
+
+#define PCIIO_BUS_NONE	255      /* bus 255 reserved */
+#define PCIIO_SLOT_NONE 255
+#define PCIIO_FUNC_NONE 255
+#define PCIIO_VENDOR_ID_NONE	(-1)
+
+struct pcidev_info {
+	u64		pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
+	u64		pdi_slot_host_handle;	/* Bus and devfn Host pci_dev */
+
+	struct pcibus_bussoft	*pdi_pcibus_info;	/* Kernel common bus soft */
+	struct pcidev_info	*pdi_host_pcidev_info;	/* Kernel Host pci_dev */
+	struct pci_dev		*pdi_linux_pcidev;	/* Kernel pci_dev */
+
+	struct sn_irq_info	*pdi_sn_irq_info;
+	struct sn_pcibus_provider *pdi_provider;	/* sn pci ops */
+	struct pci_dev 		*host_pci_dev;		/* host bus link */
+	struct list_head	pdi_list;		/* List of pcidev_info */
+};
+
+extern void sn_irq_fixup(struct pci_dev *pci_dev,
+			 struct sn_irq_info *sn_irq_info);
+extern void sn_irq_unfixup(struct pci_dev *pci_dev);
+extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
+extern void sn_bus_fixup(struct pci_bus *);
+extern void sn_acpi_bus_fixup(struct pci_bus *);
+extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *);
+extern void sn_bus_store_sysdata(struct pci_dev *dev);
+extern void sn_bus_free_sysdata(void);
+extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
+extern void sn_io_slot_fixup(struct pci_dev *);
+extern void sn_acpi_slot_fixup(struct pci_dev *);
+extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *,
+			      struct sn_irq_info *);
+extern void sn_pci_unfixup_slot(struct pci_dev *dev);
+extern void sn_irq_lh_init(void);
+#endif				/* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/arch/ia64/include/asm/sn/pda.h b/arch/ia64/include/asm/sn/pda.h
new file mode 100644
index 0000000..22ae358
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pda.h
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PDA_H
+#define _ASM_IA64_SN_PDA_H
+
+#include <linux/cache.h>
+#include <asm/percpu.h>
+
+
+/*
+ * CPU-specific data structure.
+ *
+ * One of these structures is allocated for each cpu of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together 
+ * all SN per-cpu data structures. 
+ */
+
+typedef struct pda_s {
+
+	/*
+	 * Support for SN LEDs
+	 */
+	volatile short	*led_address;
+	u8		led_state;
+	u8		hb_state;	/* supports blinking heartbeat leds */
+	unsigned int	hb_count;
+
+	unsigned int	idle_flag;
+	
+	volatile unsigned long *bedrock_rev_id;
+	volatile unsigned long *pio_write_status_addr;
+	unsigned long pio_write_status_val;
+	volatile unsigned long *pio_shub_war_cam_addr;
+
+	unsigned long	sn_in_service_ivecs[4];
+	int		sn_lb_int_war_ticks;
+	int		sn_last_irq;
+	int		sn_first_irq;
+} pda_t;
+
+
+#define CACHE_ALIGN(x)	(((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+
+/*
+ * PDA
+ * Per-cpu private data area for each cpu. The PDA is located immediately after
+ * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
+ * cpu but only a small amout of the page is actually used. We put the SNIA PDA
+ * in the same page as the cpu_data area. Note that there is a check in the setup
+ * code to verify that we don't overflow the page.
+ *
+ * Seems like we should should cache-line align the pda so that any changes in the
+ * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
+ * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
+ */
+DECLARE_PER_CPU(struct pda_s, pda_percpu);
+
+#define pda		(&__ia64_per_cpu_var(pda_percpu))
+
+#define pdacpu(cpu)	(&per_cpu(pda_percpu, cpu))
+
+#endif /* _ASM_IA64_SN_PDA_H */
diff --git a/arch/ia64/include/asm/sn/pic.h b/arch/ia64/include/asm/sn/pic.h
new file mode 100644
index 0000000..5f9da5f
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pic.h
@@ -0,0 +1,261 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PIC_H
+#define _ASM_IA64_SN_PCI_PIC_H
+
+/*
+ * PIC AS DEVICE ZERO
+ * ------------------
+ *
+ * PIC handles PCI/X busses.  PCI/X requires that the 'bridge' (i.e. PIC)
+ * be designated as 'device 0'.   That is a departure from earlier SGI
+ * PCI bridges.  Because of that we use config space 1 to access the
+ * config space of the first actual PCI device on the bus.
+ * Here's what the PIC manual says:
+ *
+ *     The current PCI-X bus specification now defines that the parent
+ *     hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
+ *     reduced the total number of devices from 8 to 4 and removed the
+ *     device registers and windows, now only supporting devices 0,1,2, and
+ *     3. PIC did leave all 8 configuration space windows. The reason was
+ *     there was nothing to gain by removing them. Here in lies the problem.
+ *     The device numbering we do using 0 through 3 is unrelated to the device
+ *     numbering which PCI-X requires in configuration space. In the past we
+ *     correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
+ *     PCI-X requires we start a 1, not 0 and currently the PX brick
+ *     does associate our:
+ *
+ *         device 0 with configuration space window 1,
+ *         device 1 with configuration space window 2,
+ *         device 2 with configuration space window 3,
+ *         device 3 with configuration space window 4.
+ *
+ * The net effect is that all config space access are off-by-one with
+ * relation to other per-slot accesses on the PIC.
+ * Here is a table that shows some of that:
+ *
+ *                               Internal Slot#
+ *           |
+ *           |     0         1        2         3
+ * ----------|---------------------------------------
+ * config    |  0x21000   0x22000  0x23000   0x24000
+ *           |
+ * even rrb  |  0[0]      n/a      1[0]      n/a	[] == implied even/odd
+ *           |
+ * odd rrb   |  n/a       0[1]     n/a       1[1]
+ *           |
+ * int dev   |  00       01        10        11
+ *           |
+ * ext slot# |  1        2         3         4
+ * ----------|---------------------------------------
+ */
+
+#define PIC_ATE_TARGETID_SHFT           8
+#define PIC_HOST_INTR_ADDR              0x0000FFFFFFFFFFFFUL
+#define PIC_PCI64_ATTR_TARG_SHFT        60
+
+
+/*****************************************************************************
+ *********************** PIC MMR structure mapping ***************************
+ *****************************************************************************/
+
+/* NOTE: PIC WAR. PV#854697.  PIC does not allow writes just to [31:0]
+ * of a 64-bit register.  When writing PIC registers, always write the
+ * entire 64 bits.
+ */
+
+struct pic {
+
+    /* 0x000000-0x00FFFF -- Local Registers */
+
+    /* 0x000000-0x000057 -- Standard Widget Configuration */
+    u64		p_wid_id;			/* 0x000000 */
+    u64		p_wid_stat;			/* 0x000008 */
+    u64		p_wid_err_upper;		/* 0x000010 */
+    u64		p_wid_err_lower;		/* 0x000018 */
+    #define p_wid_err p_wid_err_lower
+    u64		p_wid_control;			/* 0x000020 */
+    u64		p_wid_req_timeout;		/* 0x000028 */
+    u64		p_wid_int_upper;		/* 0x000030 */
+    u64		p_wid_int_lower;		/* 0x000038 */
+    #define p_wid_int p_wid_int_lower
+    u64		p_wid_err_cmdword;		/* 0x000040 */
+    u64		p_wid_llp;			/* 0x000048 */
+    u64		p_wid_tflush;			/* 0x000050 */
+
+    /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
+    u64		p_wid_aux_err;			/* 0x000058 */
+    u64		p_wid_resp_upper;		/* 0x000060 */
+    u64		p_wid_resp_lower;		/* 0x000068 */
+    #define p_wid_resp p_wid_resp_lower
+    u64		p_wid_tst_pin_ctrl;		/* 0x000070 */
+    u64		p_wid_addr_lkerr;		/* 0x000078 */
+
+    /* 0x000080-0x00008F -- PMU & MAP */
+    u64		p_dir_map;			/* 0x000080 */
+    u64		_pad_000088;			/* 0x000088 */
+
+    /* 0x000090-0x00009F -- SSRAM */
+    u64		p_map_fault;			/* 0x000090 */
+    u64		_pad_000098;			/* 0x000098 */
+
+    /* 0x0000A0-0x0000AF -- Arbitration */
+    u64		p_arb;				/* 0x0000A0 */
+    u64		_pad_0000A8;			/* 0x0000A8 */
+
+    /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+    u64		p_ate_parity_err;		/* 0x0000B0 */
+    u64		_pad_0000B8;			/* 0x0000B8 */
+
+    /* 0x0000C0-0x0000FF -- PCI/GIO */
+    u64		p_bus_timeout;			/* 0x0000C0 */
+    u64		p_pci_cfg;			/* 0x0000C8 */
+    u64		p_pci_err_upper;		/* 0x0000D0 */
+    u64		p_pci_err_lower;		/* 0x0000D8 */
+    #define p_pci_err p_pci_err_lower
+    u64		_pad_0000E0[4];			/* 0x0000{E0..F8} */
+
+    /* 0x000100-0x0001FF -- Interrupt */
+    u64		p_int_status;			/* 0x000100 */
+    u64		p_int_enable;			/* 0x000108 */
+    u64		p_int_rst_stat;			/* 0x000110 */
+    u64		p_int_mode;			/* 0x000118 */
+    u64		p_int_device;			/* 0x000120 */
+    u64		p_int_host_err;			/* 0x000128 */
+    u64		p_int_addr[8];			/* 0x0001{30,,,68} */
+    u64		p_err_int_view;			/* 0x000170 */
+    u64		p_mult_int;			/* 0x000178 */
+    u64		p_force_always[8];		/* 0x0001{80,,,B8} */
+    u64		p_force_pin[8];			/* 0x0001{C0,,,F8} */
+
+    /* 0x000200-0x000298 -- Device */
+    u64		p_device[4];			/* 0x0002{00,,,18} */
+    u64		_pad_000220[4];			/* 0x0002{20,,,38} */
+    u64		p_wr_req_buf[4];		/* 0x0002{40,,,58} */
+    u64		_pad_000260[4];			/* 0x0002{60,,,78} */
+    u64		p_rrb_map[2];			/* 0x0002{80,,,88} */
+    #define p_even_resp p_rrb_map[0]			/* 0x000280 */
+    #define p_odd_resp  p_rrb_map[1]			/* 0x000288 */
+    u64		p_resp_status;			/* 0x000290 */
+    u64		p_resp_clear;			/* 0x000298 */
+
+    u64		_pad_0002A0[12];		/* 0x0002{A0..F8} */
+
+    /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+    struct {
+	u64	upper;				/* 0x0003{00,,,F0} */
+	u64	lower;				/* 0x0003{08,,,F8} */
+    } p_buf_addr_match[16];
+
+    /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+    struct {
+	u64	flush_w_touch;			/* 0x000{400,,,5C0} */
+	u64	flush_wo_touch;			/* 0x000{408,,,5C8} */
+	u64	inflight;			/* 0x000{410,,,5D0} */
+	u64	prefetch;			/* 0x000{418,,,5D8} */
+	u64	total_pci_retry;		/* 0x000{420,,,5E0} */
+	u64	max_pci_retry;			/* 0x000{428,,,5E8} */
+	u64	max_latency;			/* 0x000{430,,,5F0} */
+	u64	clear_all;			/* 0x000{438,,,5F8} */
+    } p_buf_count[8];
+
+
+    /* 0x000600-0x0009FF -- PCI/X registers */
+    u64		p_pcix_bus_err_addr;		/* 0x000600 */
+    u64		p_pcix_bus_err_attr;		/* 0x000608 */
+    u64		p_pcix_bus_err_data;		/* 0x000610 */
+    u64		p_pcix_pio_split_addr;		/* 0x000618 */
+    u64		p_pcix_pio_split_attr;		/* 0x000620 */
+    u64		p_pcix_dma_req_err_attr;	/* 0x000628 */
+    u64		p_pcix_dma_req_err_addr;	/* 0x000630 */
+    u64		p_pcix_timeout;			/* 0x000638 */
+
+    u64		_pad_000640[120];		/* 0x000{640,,,9F8} */
+
+    /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+    struct {
+	u64	p_buf_addr;			/* 0x000{A00,,,AF0} */
+	u64	p_buf_attr;			/* 0X000{A08,,,AF8} */
+    } p_pcix_read_buf_64[16];
+
+    struct {
+	u64	p_buf_addr;			/* 0x000{B00,,,BE0} */
+	u64	p_buf_attr;			/* 0x000{B08,,,BE8} */
+	u64	p_buf_valid;			/* 0x000{B10,,,BF0} */
+	u64	__pad1;				/* 0x000{B18,,,BF8} */
+    } p_pcix_write_buf_64[8];
+
+    /* End of Local Registers -- Start of Address Map space */
+
+    char		_pad_000c00[0x010000 - 0x000c00];
+
+    /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
+    u64		p_int_ate_ram[1024];		/* 0x010000-0x011fff */
+
+    /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
+    u64		p_int_ate_ram_mp[1024];		/* 0x012000-0x013fff */
+
+    char		_pad_014000[0x18000 - 0x014000];
+
+    /* 0x18000-0x197F8 -- PIC Write Request Ram */
+    u64		p_wr_req_lower[256];		/* 0x18000 - 0x187F8 */
+    u64		p_wr_req_upper[256];		/* 0x18800 - 0x18FF8 */
+    u64		p_wr_req_parity[256];		/* 0x19000 - 0x197F8 */
+
+    char		_pad_019800[0x20000 - 0x019800];
+
+    /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
+    union {
+	u8		c[0x1000 / 1];			/* 0x02{0000,,,7FFF} */
+	u16	s[0x1000 / 2];			/* 0x02{0000,,,7FFF} */
+	u32	l[0x1000 / 4];			/* 0x02{0000,,,7FFF} */
+	u64	d[0x1000 / 8];			/* 0x02{0000,,,7FFF} */
+	union {
+	    u8	c[0x100 / 1];
+	    u16	s[0x100 / 2];
+	    u32	l[0x100 / 4];
+	    u64	d[0x100 / 8];
+	} f[8];
+    } p_type0_cfg_dev[8];				/* 0x02{0000,,,7FFF} */
+
+    /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+    union {
+	u8		c[0x1000 / 1];			/* 0x028000-0x029000 */
+	u16	s[0x1000 / 2];			/* 0x028000-0x029000 */
+	u32	l[0x1000 / 4];			/* 0x028000-0x029000 */
+	u64	d[0x1000 / 8];			/* 0x028000-0x029000 */
+	union {
+	    u8	c[0x100 / 1];
+	    u16	s[0x100 / 2];
+	    u32	l[0x100 / 4];
+	    u64	d[0x100 / 8];
+	} f[8];
+    } p_type1_cfg;					/* 0x028000-0x029000 */
+
+    char		_pad_029000[0x030000-0x029000];
+
+    /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+    union {
+	u8		c[8 / 1];
+	u16	s[8 / 2];
+	u32	l[8 / 4];
+	u64	d[8 / 8];
+    } p_pci_iack;					/* 0x030000-0x030007 */
+
+    char		_pad_030007[0x040000-0x030008];
+
+    /* 0x040000-0x030007 -- PCIX Special Cycle */
+    union {
+	u8		c[8 / 1];
+	u16	s[8 / 2];
+	u32	l[8 / 4];
+	u64	d[8 / 8];
+    } p_pcix_cycle;					/* 0x040000-0x040007 */
+};
+
+#endif                          /* _ASM_IA64_SN_PCI_PIC_H */
diff --git a/arch/ia64/include/asm/sn/rw_mmr.h b/arch/ia64/include/asm/sn/rw_mmr.h
new file mode 100644
index 0000000..2d78f4c
--- /dev/null
+++ b/arch/ia64/include/asm/sn/rw_mmr.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_RW_MMR_H
+#define _ASM_IA64_SN_RW_MMR_H
+
+
+/*
+ * This file that access MMRs via uncached physical addresses.
+ * 	pio_phys_read_mmr  - read an MMR
+ * 	pio_phys_write_mmr - write an MMR
+ * 	pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ *		Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ * ie., 0x80000....
+ */
+
+
+extern long pio_phys_read_mmr(volatile long *mmr); 
+extern void pio_phys_write_mmr(volatile long *mmr, long val);
+extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); 
+
+#endif /* _ASM_IA64_SN_RW_MMR_H */
diff --git a/arch/ia64/include/asm/sn/shub_mmr.h b/arch/ia64/include/asm/sn/shub_mmr.h
new file mode 100644
index 0000000..a84d870
--- /dev/null
+++ b/arch/ia64/include/asm/sn/shub_mmr.h
@@ -0,0 +1,502 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2005 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUB_MMR_H
+#define _ASM_IA64_SN_SHUB_MMR_H
+
+/* ==================================================================== */
+/*                        Register "SH_IPI_INT"                         */
+/*               SHub Inter-Processor Interrupt Registers               */
+/* ==================================================================== */
+#define SH1_IPI_INT			__IA64_UL_CONST(0x0000000110000380)
+#define SH2_IPI_INT			__IA64_UL_CONST(0x0000000010000380)
+
+/*   SH_IPI_INT_TYPE                                                    */
+/*   Description:  Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT       */
+#define SH_IPI_INT_TYPE_SHFT				0
+#define SH_IPI_INT_TYPE_MASK		__IA64_UL_CONST(0x0000000000000007)
+
+/*   SH_IPI_INT_AGT                                                     */
+/*   Description:  Agent, must be 0 for SHub                            */
+#define SH_IPI_INT_AGT_SHFT				3
+#define SH_IPI_INT_AGT_MASK		__IA64_UL_CONST(0x0000000000000008)
+
+/*   SH_IPI_INT_PID                                                     */
+/*   Description:  Processor ID, same setting as on targeted McKinley  */
+#define SH_IPI_INT_PID_SHFT                      	4
+#define SH_IPI_INT_PID_MASK		__IA64_UL_CONST(0x00000000000ffff0)
+
+/*   SH_IPI_INT_BASE                                                    */
+/*   Description:  Optional interrupt vector area, 2MB aligned          */
+#define SH_IPI_INT_BASE_SHFT				21
+#define SH_IPI_INT_BASE_MASK 		__IA64_UL_CONST(0x0003ffffffe00000)
+
+/*   SH_IPI_INT_IDX                                                     */
+/*   Description:  Targeted McKinley interrupt vector                   */
+#define SH_IPI_INT_IDX_SHFT				52
+#define SH_IPI_INT_IDX_MASK		__IA64_UL_CONST(0x0ff0000000000000)
+
+/*   SH_IPI_INT_SEND                                                    */
+/*   Description:  Send Interrupt Message to PI, This generates a puls  */
+#define SH_IPI_INT_SEND_SHFT				63
+#define SH_IPI_INT_SEND_MASK		__IA64_UL_CONST(0x8000000000000000)
+
+/* ==================================================================== */
+/*                     Register "SH_EVENT_OCCURRED"                     */
+/*                    SHub Interrupt Event Occurred                     */
+/* ==================================================================== */
+#define SH1_EVENT_OCCURRED		__IA64_UL_CONST(0x0000000110010000)
+#define SH1_EVENT_OCCURRED_ALIAS	__IA64_UL_CONST(0x0000000110010008)
+#define SH2_EVENT_OCCURRED		__IA64_UL_CONST(0x0000000010010000)
+#define SH2_EVENT_OCCURRED_ALIAS 	__IA64_UL_CONST(0x0000000010010008)
+
+/* ==================================================================== */
+/*                     Register "SH_PI_CAM_CONTROL"                     */
+/*                      CRB CAM MMR Access Control                      */
+/* ==================================================================== */
+#define SH1_PI_CAM_CONTROL		__IA64_UL_CONST(0x0000000120050300)
+
+/* ==================================================================== */
+/*                        Register "SH_SHUB_ID"                         */
+/*                            SHub ID Number                            */
+/* ==================================================================== */
+#define SH1_SHUB_ID			__IA64_UL_CONST(0x0000000110060580)
+#define SH1_SHUB_ID_REVISION_SHFT			28
+#define SH1_SHUB_ID_REVISION_MASK	__IA64_UL_CONST(0x00000000f0000000)
+
+/* ==================================================================== */
+/*                          Register "SH_RTC"                           */
+/*                           Real-time Clock                            */
+/* ==================================================================== */
+#define SH1_RTC				__IA64_UL_CONST(0x00000001101c0000)
+#define SH2_RTC				__IA64_UL_CONST(0x00000002101c0000)
+#define SH_RTC_MASK			__IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/*                   Register "SH_PIO_WRITE_STATUS_0|1"                 */
+/*                      PIO Write Status for CPU 0 & 1                  */
+/* ==================================================================== */
+#define SH1_PIO_WRITE_STATUS_0		__IA64_UL_CONST(0x0000000120070200)
+#define SH1_PIO_WRITE_STATUS_1		__IA64_UL_CONST(0x0000000120070280)
+#define SH2_PIO_WRITE_STATUS_0		__IA64_UL_CONST(0x0000000020070200)
+#define SH2_PIO_WRITE_STATUS_1		__IA64_UL_CONST(0x0000000020070280)
+#define SH2_PIO_WRITE_STATUS_2		__IA64_UL_CONST(0x0000000020070300)
+#define SH2_PIO_WRITE_STATUS_3		__IA64_UL_CONST(0x0000000020070380)
+
+/*   SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK                               */
+/*   Description:  Deadlock response detected                           */
+#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT		1
+#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \
+					__IA64_UL_CONST(0x0000000000000002)
+
+/*   SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT                          */
+/*   Description:  Count of currently pending PIO writes                */
+#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT	56
+#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \
+					__IA64_UL_CONST(0x3f00000000000000)
+
+/* ==================================================================== */
+/*                Register "SH_PIO_WRITE_STATUS_0_ALIAS"                */
+/* ==================================================================== */
+#define SH1_PIO_WRITE_STATUS_0_ALIAS	__IA64_UL_CONST(0x0000000120070208)
+#define SH2_PIO_WRITE_STATUS_0_ALIAS	__IA64_UL_CONST(0x0000000020070208)
+
+/* ==================================================================== */
+/*                     Register "SH_EVENT_OCCURRED"                     */
+/*                    SHub Interrupt Event Occurred                     */
+/* ==================================================================== */
+/*   SH_EVENT_OCCURRED_UART_INT                                         */
+/*   Description:  Pending Junk Bus UART Interrupt                      */
+#define SH_EVENT_OCCURRED_UART_INT_SHFT			20
+#define SH_EVENT_OCCURRED_UART_INT_MASK	__IA64_UL_CONST(0x0000000000100000)
+
+/*   SH_EVENT_OCCURRED_IPI_INT                                          */
+/*   Description:  Pending IPI Interrupt                                */
+#define SH_EVENT_OCCURRED_IPI_INT_SHFT			28
+#define SH_EVENT_OCCURRED_IPI_INT_MASK	__IA64_UL_CONST(0x0000000010000000)
+
+/*   SH_EVENT_OCCURRED_II_INT0                                          */
+/*   Description:  Pending II 0 Interrupt                               */
+#define SH_EVENT_OCCURRED_II_INT0_SHFT			29
+#define SH_EVENT_OCCURRED_II_INT0_MASK	__IA64_UL_CONST(0x0000000020000000)
+
+/*   SH_EVENT_OCCURRED_II_INT1                                          */
+/*   Description:  Pending II 1 Interrupt                               */
+#define SH_EVENT_OCCURRED_II_INT1_SHFT			30
+#define SH_EVENT_OCCURRED_II_INT1_MASK	__IA64_UL_CONST(0x0000000040000000)
+
+/*   SH2_EVENT_OCCURRED_EXTIO_INT2                                      */
+/*   Description:  Pending SHUB 2 EXT IO INT2                           */
+#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT		33
+#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000)
+
+/*   SH2_EVENT_OCCURRED_EXTIO_INT3                                      */
+/*   Description:  Pending SHUB 2 EXT IO INT3                           */
+#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT		34
+#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000)
+
+#define SH_ALL_INT_MASK \
+	(SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
+	 SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
+	 SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
+	 SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
+
+
+/* ==================================================================== */
+/*                         LEDS                                         */
+/* ==================================================================== */
+#define SH1_REAL_JUNK_BUS_LED0			0x7fed00000UL
+#define SH1_REAL_JUNK_BUS_LED1			0x7fed10000UL
+#define SH1_REAL_JUNK_BUS_LED2			0x7fed20000UL
+#define SH1_REAL_JUNK_BUS_LED3			0x7fed30000UL
+
+#define SH2_REAL_JUNK_BUS_LED0			0xf0000000UL
+#define SH2_REAL_JUNK_BUS_LED1			0xf0010000UL
+#define SH2_REAL_JUNK_BUS_LED2			0xf0020000UL
+#define SH2_REAL_JUNK_BUS_LED3			0xf0030000UL
+
+/* ==================================================================== */
+/*                         Register "SH1_PTC_0"                         */
+/*       Puge Translation Cache Message Configuration Information       */
+/* ==================================================================== */
+#define SH1_PTC_0			__IA64_UL_CONST(0x00000001101a0000)
+
+/*   SH1_PTC_0_A                                                        */
+/*   Description:  Type                                                 */
+#define SH1_PTC_0_A_SHFT				0
+
+/*   SH1_PTC_0_PS                                                       */
+/*   Description:  Page Size                                            */
+#define SH1_PTC_0_PS_SHFT				2
+
+/*   SH1_PTC_0_RID                                                      */
+/*   Description:  Region ID                                            */
+#define SH1_PTC_0_RID_SHFT				8
+
+/*   SH1_PTC_0_START                                                    */
+/*   Description:  Start                                                */
+#define SH1_PTC_0_START_SHFT				63
+
+/* ==================================================================== */
+/*                         Register "SH1_PTC_1"                         */
+/*       Puge Translation Cache Message Configuration Information       */
+/* ==================================================================== */
+#define SH1_PTC_1			__IA64_UL_CONST(0x00000001101a0080)
+
+/*   SH1_PTC_1_START                                                    */
+/*   Description:  PTC_1 Start                                          */
+#define SH1_PTC_1_START_SHFT				63
+
+/* ==================================================================== */
+/*                         Register "SH2_PTC"                           */
+/*       Puge Translation Cache Message Configuration Information       */
+/* ==================================================================== */
+#define SH2_PTC				__IA64_UL_CONST(0x0000000170000000)
+
+/*   SH2_PTC_A                                                          */
+/*   Description:  Type                                                 */
+#define SH2_PTC_A_SHFT					0
+
+/*   SH2_PTC_PS                                                         */
+/*   Description:  Page Size                                            */
+#define SH2_PTC_PS_SHFT					2
+
+/*   SH2_PTC_RID                                                      */
+/*   Description:  Region ID                                            */
+#define SH2_PTC_RID_SHFT				4
+
+/*   SH2_PTC_START                                                      */
+/*   Description:  Start                                                */
+#define SH2_PTC_START_SHFT				63
+
+/*   SH2_PTC_ADDR_RID                                                   */
+/*   Description:  Region ID                                            */
+#define SH2_PTC_ADDR_SHFT				4
+#define SH2_PTC_ADDR_MASK		__IA64_UL_CONST(0x1ffffffffffff000)
+
+/* ==================================================================== */
+/*                    Register "SH_RTC1_INT_CONFIG"                     */
+/*                SHub RTC 1 Interrupt Config Registers                 */
+/* ==================================================================== */
+
+#define SH1_RTC1_INT_CONFIG		__IA64_UL_CONST(0x0000000110001480)
+#define SH2_RTC1_INT_CONFIG		__IA64_UL_CONST(0x0000000010001480)
+#define SH_RTC1_INT_CONFIG_MASK		__IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC1_INT_CONFIG_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_RTC1_INT_CONFIG_TYPE                                            */
+/*   Description:  Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT       */
+#define SH_RTC1_INT_CONFIG_TYPE_SHFT			0
+#define SH_RTC1_INT_CONFIG_TYPE_MASK	__IA64_UL_CONST(0x0000000000000007)
+
+/*   SH_RTC1_INT_CONFIG_AGT                                             */
+/*   Description:  Agent, must be 0 for SHub                            */
+#define SH_RTC1_INT_CONFIG_AGT_SHFT			3
+#define SH_RTC1_INT_CONFIG_AGT_MASK	__IA64_UL_CONST(0x0000000000000008)
+
+/*   SH_RTC1_INT_CONFIG_PID                                             */
+/*   Description:  Processor ID, same setting as on targeted McKinley  */
+#define SH_RTC1_INT_CONFIG_PID_SHFT			4
+#define SH_RTC1_INT_CONFIG_PID_MASK	__IA64_UL_CONST(0x00000000000ffff0)
+
+/*   SH_RTC1_INT_CONFIG_BASE                                            */
+/*   Description:  Optional interrupt vector area, 2MB aligned          */
+#define SH_RTC1_INT_CONFIG_BASE_SHFT			21
+#define SH_RTC1_INT_CONFIG_BASE_MASK	__IA64_UL_CONST(0x0003ffffffe00000)
+
+/*   SH_RTC1_INT_CONFIG_IDX                                             */
+/*   Description:  Targeted McKinley interrupt vector                   */
+#define SH_RTC1_INT_CONFIG_IDX_SHFT			52
+#define SH_RTC1_INT_CONFIG_IDX_MASK	__IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/*                    Register "SH_RTC1_INT_ENABLE"                     */
+/*                SHub RTC 1 Interrupt Enable Registers                 */
+/* ==================================================================== */
+
+#define SH1_RTC1_INT_ENABLE		__IA64_UL_CONST(0x0000000110001500)
+#define SH2_RTC1_INT_ENABLE		__IA64_UL_CONST(0x0000000010001500)
+#define SH_RTC1_INT_ENABLE_MASK		__IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC1_INT_ENABLE_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_RTC1_INT_ENABLE_RTC1_ENABLE                                     */
+/*   Description:  Enable RTC 1 Interrupt                               */
+#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT		0
+#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \
+					__IA64_UL_CONST(0x0000000000000001)
+
+/* ==================================================================== */
+/*                    Register "SH_RTC2_INT_CONFIG"                     */
+/*                SHub RTC 2 Interrupt Config Registers                 */
+/* ==================================================================== */
+
+#define SH1_RTC2_INT_CONFIG		__IA64_UL_CONST(0x0000000110001580)
+#define SH2_RTC2_INT_CONFIG		__IA64_UL_CONST(0x0000000010001580)
+#define SH_RTC2_INT_CONFIG_MASK		__IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC2_INT_CONFIG_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_RTC2_INT_CONFIG_TYPE                                            */
+/*   Description:  Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT       */
+#define SH_RTC2_INT_CONFIG_TYPE_SHFT			0
+#define SH_RTC2_INT_CONFIG_TYPE_MASK	__IA64_UL_CONST(0x0000000000000007)
+
+/*   SH_RTC2_INT_CONFIG_AGT                                             */
+/*   Description:  Agent, must be 0 for SHub                            */
+#define SH_RTC2_INT_CONFIG_AGT_SHFT			3
+#define SH_RTC2_INT_CONFIG_AGT_MASK	__IA64_UL_CONST(0x0000000000000008)
+
+/*   SH_RTC2_INT_CONFIG_PID                                             */
+/*   Description:  Processor ID, same setting as on targeted McKinley  */
+#define SH_RTC2_INT_CONFIG_PID_SHFT			4
+#define SH_RTC2_INT_CONFIG_PID_MASK	__IA64_UL_CONST(0x00000000000ffff0)
+
+/*   SH_RTC2_INT_CONFIG_BASE                                            */
+/*   Description:  Optional interrupt vector area, 2MB aligned          */
+#define SH_RTC2_INT_CONFIG_BASE_SHFT			21
+#define SH_RTC2_INT_CONFIG_BASE_MASK	__IA64_UL_CONST(0x0003ffffffe00000)
+
+/*   SH_RTC2_INT_CONFIG_IDX                                             */
+/*   Description:  Targeted McKinley interrupt vector                   */
+#define SH_RTC2_INT_CONFIG_IDX_SHFT			52
+#define SH_RTC2_INT_CONFIG_IDX_MASK	__IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/*                    Register "SH_RTC2_INT_ENABLE"                     */
+/*                SHub RTC 2 Interrupt Enable Registers                 */
+/* ==================================================================== */
+
+#define SH1_RTC2_INT_ENABLE		__IA64_UL_CONST(0x0000000110001600)
+#define SH2_RTC2_INT_ENABLE		__IA64_UL_CONST(0x0000000010001600)
+#define SH_RTC2_INT_ENABLE_MASK		__IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC2_INT_ENABLE_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_RTC2_INT_ENABLE_RTC2_ENABLE                                     */
+/*   Description:  Enable RTC 2 Interrupt                               */
+#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT		0
+#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \
+					__IA64_UL_CONST(0x0000000000000001)
+
+/* ==================================================================== */
+/*                    Register "SH_RTC3_INT_CONFIG"                     */
+/*                SHub RTC 3 Interrupt Config Registers                 */
+/* ==================================================================== */
+
+#define SH1_RTC3_INT_CONFIG		__IA64_UL_CONST(0x0000000110001680)
+#define SH2_RTC3_INT_CONFIG		__IA64_UL_CONST(0x0000000010001680)
+#define SH_RTC3_INT_CONFIG_MASK		__IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC3_INT_CONFIG_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_RTC3_INT_CONFIG_TYPE                                            */
+/*   Description:  Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT       */
+#define SH_RTC3_INT_CONFIG_TYPE_SHFT			0
+#define SH_RTC3_INT_CONFIG_TYPE_MASK	__IA64_UL_CONST(0x0000000000000007)
+
+/*   SH_RTC3_INT_CONFIG_AGT                                             */
+/*   Description:  Agent, must be 0 for SHub                            */
+#define SH_RTC3_INT_CONFIG_AGT_SHFT			3
+#define SH_RTC3_INT_CONFIG_AGT_MASK	__IA64_UL_CONST(0x0000000000000008)
+
+/*   SH_RTC3_INT_CONFIG_PID                                             */
+/*   Description:  Processor ID, same setting as on targeted McKinley  */
+#define SH_RTC3_INT_CONFIG_PID_SHFT			4
+#define SH_RTC3_INT_CONFIG_PID_MASK	__IA64_UL_CONST(0x00000000000ffff0)
+
+/*   SH_RTC3_INT_CONFIG_BASE                                            */
+/*   Description:  Optional interrupt vector area, 2MB aligned          */
+#define SH_RTC3_INT_CONFIG_BASE_SHFT			21
+#define SH_RTC3_INT_CONFIG_BASE_MASK	__IA64_UL_CONST(0x0003ffffffe00000)
+
+/*   SH_RTC3_INT_CONFIG_IDX                                             */
+/*   Description:  Targeted McKinley interrupt vector                   */
+#define SH_RTC3_INT_CONFIG_IDX_SHFT			52
+#define SH_RTC3_INT_CONFIG_IDX_MASK	__IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/*                    Register "SH_RTC3_INT_ENABLE"                     */
+/*                SHub RTC 3 Interrupt Enable Registers                 */
+/* ==================================================================== */
+
+#define SH1_RTC3_INT_ENABLE		__IA64_UL_CONST(0x0000000110001700)
+#define SH2_RTC3_INT_ENABLE		__IA64_UL_CONST(0x0000000010001700)
+#define SH_RTC3_INT_ENABLE_MASK		__IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC3_INT_ENABLE_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_RTC3_INT_ENABLE_RTC3_ENABLE                                     */
+/*   Description:  Enable RTC 3 Interrupt                               */
+#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT		0
+#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \
+					__IA64_UL_CONST(0x0000000000000001)
+
+/*   SH_EVENT_OCCURRED_RTC1_INT                                         */
+/*   Description:  Pending RTC 1 Interrupt                              */
+#define SH_EVENT_OCCURRED_RTC1_INT_SHFT			24
+#define SH_EVENT_OCCURRED_RTC1_INT_MASK	__IA64_UL_CONST(0x0000000001000000)
+
+/*   SH_EVENT_OCCURRED_RTC2_INT                                         */
+/*   Description:  Pending RTC 2 Interrupt                              */
+#define SH_EVENT_OCCURRED_RTC2_INT_SHFT			25
+#define SH_EVENT_OCCURRED_RTC2_INT_MASK	__IA64_UL_CONST(0x0000000002000000)
+
+/*   SH_EVENT_OCCURRED_RTC3_INT                                         */
+/*   Description:  Pending RTC 3 Interrupt                              */
+#define SH_EVENT_OCCURRED_RTC3_INT_SHFT			26
+#define SH_EVENT_OCCURRED_RTC3_INT_MASK	__IA64_UL_CONST(0x0000000004000000)
+
+/* ==================================================================== */
+/*                       Register "SH_IPI_ACCESS"                       */
+/*                 CPU interrupt Access Permission Bits                 */
+/* ==================================================================== */
+
+#define SH1_IPI_ACCESS			__IA64_UL_CONST(0x0000000110060480)
+#define SH2_IPI_ACCESS0			__IA64_UL_CONST(0x0000000010060c00)
+#define SH2_IPI_ACCESS1			__IA64_UL_CONST(0x0000000010060c80)
+#define SH2_IPI_ACCESS2			__IA64_UL_CONST(0x0000000010060d00)
+#define SH2_IPI_ACCESS3			__IA64_UL_CONST(0x0000000010060d80)
+
+/* ==================================================================== */
+/*                        Register "SH_INT_CMPB"                        */
+/*                  RTC Compare Value for Processor B                   */
+/* ==================================================================== */
+
+#define SH1_INT_CMPB			__IA64_UL_CONST(0x00000001101b0080)
+#define SH2_INT_CMPB			__IA64_UL_CONST(0x00000000101b0080)
+#define SH_INT_CMPB_MASK		__IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPB_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_INT_CMPB_REAL_TIME_CMPB                                         */
+/*   Description:  Real Time Clock Compare                              */
+#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT			0
+#define SH_INT_CMPB_REAL_TIME_CMPB_MASK	__IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/*                        Register "SH_INT_CMPC"                        */
+/*                  RTC Compare Value for Processor C                   */
+/* ==================================================================== */
+
+#define SH1_INT_CMPC			__IA64_UL_CONST(0x00000001101b0100)
+#define SH2_INT_CMPC			__IA64_UL_CONST(0x00000000101b0100)
+#define SH_INT_CMPC_MASK		__IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPC_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_INT_CMPC_REAL_TIME_CMPC                                         */
+/*   Description:  Real Time Clock Compare                              */
+#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT			0
+#define SH_INT_CMPC_REAL_TIME_CMPC_MASK	__IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/*                        Register "SH_INT_CMPD"                        */
+/*                  RTC Compare Value for Processor D                   */
+/* ==================================================================== */
+
+#define SH1_INT_CMPD			__IA64_UL_CONST(0x00000001101b0180)
+#define SH2_INT_CMPD			__IA64_UL_CONST(0x00000000101b0180)
+#define SH_INT_CMPD_MASK		__IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPD_INIT		__IA64_UL_CONST(0x0000000000000000)
+
+/*   SH_INT_CMPD_REAL_TIME_CMPD                                         */
+/*   Description:  Real Time Clock Compare                              */
+#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT			0
+#define SH_INT_CMPD_REAL_TIME_CMPD_MASK	__IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/*                Register "SH_MD_DQLP_MMR_DIR_PRIVEC0"                 */
+/*                      privilege vector for acc=0                      */
+/* ==================================================================== */
+#define SH1_MD_DQLP_MMR_DIR_PRIVEC0	__IA64_UL_CONST(0x0000000100030300)
+
+/* ==================================================================== */
+/*                Register "SH_MD_DQRP_MMR_DIR_PRIVEC0"                 */
+/*                      privilege vector for acc=0                      */
+/* ==================================================================== */
+#define SH1_MD_DQRP_MMR_DIR_PRIVEC0	__IA64_UL_CONST(0x0000000100050300)
+
+/* ==================================================================== */
+/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
+/* and SHUB2 that it makes sense to define a geberic name for the MMR.  */
+/* It is acceptable to use (for example) SH_IPI_INT to reference the    */
+/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based  */
+/* on the type of the SHUB. Do not use these #defines in performance    */
+/* critical code  or loops - there is a small performance penalty.      */
+/* ==================================================================== */
+#define shubmmr(a,b) 		(is_shub2() ? a##2_##b : a##1_##b)
+
+#define SH_REAL_JUNK_BUS_LED0	shubmmr(SH, REAL_JUNK_BUS_LED0)
+#define SH_IPI_INT		shubmmr(SH, IPI_INT)
+#define SH_EVENT_OCCURRED	shubmmr(SH, EVENT_OCCURRED)
+#define SH_EVENT_OCCURRED_ALIAS	shubmmr(SH, EVENT_OCCURRED_ALIAS)
+#define SH_RTC			shubmmr(SH, RTC)
+#define SH_RTC1_INT_CONFIG	shubmmr(SH, RTC1_INT_CONFIG)
+#define SH_RTC1_INT_ENABLE	shubmmr(SH, RTC1_INT_ENABLE)
+#define SH_RTC2_INT_CONFIG	shubmmr(SH, RTC2_INT_CONFIG)
+#define SH_RTC2_INT_ENABLE	shubmmr(SH, RTC2_INT_ENABLE)
+#define SH_RTC3_INT_CONFIG	shubmmr(SH, RTC3_INT_CONFIG)
+#define SH_RTC3_INT_ENABLE	shubmmr(SH, RTC3_INT_ENABLE)
+#define SH_INT_CMPB		shubmmr(SH, INT_CMPB)
+#define SH_INT_CMPC		shubmmr(SH, INT_CMPC)
+#define SH_INT_CMPD		shubmmr(SH, INT_CMPD)
+
+/* ========================================================================== */
+/*                        Register "SH2_BT_ENG_CSR_0"                         */
+/*                    Engine 0 Control and Status Register                    */
+/* ========================================================================== */
+
+#define SH2_BT_ENG_CSR_0		__IA64_UL_CONST(0x0000000030040000)
+#define SH2_BT_ENG_SRC_ADDR_0		__IA64_UL_CONST(0x0000000030040080)
+#define SH2_BT_ENG_DEST_ADDR_0		__IA64_UL_CONST(0x0000000030040100)
+#define SH2_BT_ENG_NOTIF_ADDR_0		__IA64_UL_CONST(0x0000000030040180)
+
+/* ========================================================================== */
+/*                       BTE interfaces 1-3                                   */
+/* ========================================================================== */
+
+#define SH2_BT_ENG_CSR_1		__IA64_UL_CONST(0x0000000030050000)
+#define SH2_BT_ENG_CSR_2		__IA64_UL_CONST(0x0000000030060000)
+#define SH2_BT_ENG_CSR_3		__IA64_UL_CONST(0x0000000030070000)
+
+#endif /* _ASM_IA64_SN_SHUB_MMR_H */
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
new file mode 100644
index 0000000..ecb8a49
--- /dev/null
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -0,0 +1,3358 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUBIO_H
+#define _ASM_IA64_SN_SHUBIO_H
+
+#define HUB_WIDGET_ID_MAX	0xf
+#define IIO_NUM_ITTES		7
+#define HUB_NUM_BIG_WINDOW	(IIO_NUM_ITTES - 1)
+
+#define		IIO_WID			0x00400000	/* Crosstalk Widget Identification */
+							/* This register is also accessible from
+							 * Crosstalk at address 0x0.  */
+#define		IIO_WSTAT		0x00400008	/* Crosstalk Widget Status */
+#define		IIO_WCR			0x00400020	/* Crosstalk Widget Control Register */
+#define		IIO_ILAPR		0x00400100	/* IO Local Access Protection Register */
+#define		IIO_ILAPO		0x00400108	/* IO Local Access Protection Override */
+#define		IIO_IOWA		0x00400110	/* IO Outbound Widget Access */
+#define		IIO_IIWA		0x00400118	/* IO Inbound Widget Access */
+#define		IIO_IIDEM		0x00400120	/* IO Inbound Device Error Mask */
+#define		IIO_ILCSR		0x00400128	/* IO LLP Control and Status Register */
+#define		IIO_ILLR		0x00400130	/* IO LLP Log Register    */
+#define		IIO_IIDSR		0x00400138	/* IO Interrupt Destination */
+
+#define		IIO_IGFX0		0x00400140	/* IO Graphics Node-Widget Map 0 */
+#define		IIO_IGFX1		0x00400148	/* IO Graphics Node-Widget Map 1 */
+
+#define		IIO_ISCR0		0x00400150	/* IO Scratch Register 0 */
+#define		IIO_ISCR1		0x00400158	/* IO Scratch Register 1 */
+
+#define		IIO_ITTE1		0x00400160	/* IO Translation Table Entry 1 */
+#define		IIO_ITTE2		0x00400168	/* IO Translation Table Entry 2 */
+#define		IIO_ITTE3		0x00400170	/* IO Translation Table Entry 3 */
+#define		IIO_ITTE4		0x00400178	/* IO Translation Table Entry 4 */
+#define		IIO_ITTE5		0x00400180	/* IO Translation Table Entry 5 */
+#define		IIO_ITTE6		0x00400188	/* IO Translation Table Entry 6 */
+#define		IIO_ITTE7		0x00400190	/* IO Translation Table Entry 7 */
+
+#define		IIO_IPRB0		0x00400198	/* IO PRB Entry 0   */
+#define		IIO_IPRB8		0x004001A0	/* IO PRB Entry 8   */
+#define		IIO_IPRB9		0x004001A8	/* IO PRB Entry 9   */
+#define		IIO_IPRBA		0x004001B0	/* IO PRB Entry A   */
+#define		IIO_IPRBB		0x004001B8	/* IO PRB Entry B   */
+#define		IIO_IPRBC		0x004001C0	/* IO PRB Entry C   */
+#define		IIO_IPRBD		0x004001C8	/* IO PRB Entry D   */
+#define		IIO_IPRBE		0x004001D0	/* IO PRB Entry E   */
+#define		IIO_IPRBF		0x004001D8	/* IO PRB Entry F   */
+
+#define		IIO_IXCC		0x004001E0	/* IO Crosstalk Credit Count Timeout */
+#define		IIO_IMEM		0x004001E8	/* IO Miscellaneous Error Mask */
+#define		IIO_IXTT		0x004001F0	/* IO Crosstalk Timeout Threshold */
+#define		IIO_IECLR		0x004001F8	/* IO Error Clear Register */
+#define		IIO_IBCR		0x00400200	/* IO BTE Control Register */
+
+#define		IIO_IXSM		0x00400208	/* IO Crosstalk Spurious Message */
+#define		IIO_IXSS		0x00400210	/* IO Crosstalk Spurious Sideband */
+
+#define		IIO_ILCT		0x00400218	/* IO LLP Channel Test    */
+
+#define		IIO_IIEPH1 		0x00400220	/* IO Incoming Error Packet Header, Part 1 */
+#define		IIO_IIEPH2 		0x00400228	/* IO Incoming Error Packet Header, Part 2 */
+
+#define		IIO_ISLAPR 		0x00400230	/* IO SXB Local Access Protection Regster */
+#define		IIO_ISLAPO 		0x00400238	/* IO SXB Local Access Protection Override */
+
+#define		IIO_IWI			0x00400240	/* IO Wrapper Interrupt Register */
+#define		IIO_IWEL		0x00400248	/* IO Wrapper Error Log Register */
+#define		IIO_IWC			0x00400250	/* IO Wrapper Control Register */
+#define		IIO_IWS			0x00400258	/* IO Wrapper Status Register */
+#define		IIO_IWEIM		0x00400260	/* IO Wrapper Error Interrupt Masking Register */
+
+#define		IIO_IPCA		0x00400300	/* IO PRB Counter Adjust */
+
+#define		IIO_IPRTE0_A		0x00400308	/* IO PIO Read Address Table Entry 0, Part A */
+#define		IIO_IPRTE1_A		0x00400310	/* IO PIO Read Address Table Entry 1, Part A */
+#define		IIO_IPRTE2_A		0x00400318	/* IO PIO Read Address Table Entry 2, Part A */
+#define		IIO_IPRTE3_A		0x00400320	/* IO PIO Read Address Table Entry 3, Part A */
+#define		IIO_IPRTE4_A		0x00400328	/* IO PIO Read Address Table Entry 4, Part A */
+#define		IIO_IPRTE5_A		0x00400330	/* IO PIO Read Address Table Entry 5, Part A */
+#define		IIO_IPRTE6_A		0x00400338	/* IO PIO Read Address Table Entry 6, Part A */
+#define		IIO_IPRTE7_A		0x00400340	/* IO PIO Read Address Table Entry 7, Part A */
+
+#define		IIO_IPRTE0_B		0x00400348	/* IO PIO Read Address Table Entry 0, Part B */
+#define		IIO_IPRTE1_B		0x00400350	/* IO PIO Read Address Table Entry 1, Part B */
+#define		IIO_IPRTE2_B		0x00400358	/* IO PIO Read Address Table Entry 2, Part B */
+#define		IIO_IPRTE3_B		0x00400360	/* IO PIO Read Address Table Entry 3, Part B */
+#define		IIO_IPRTE4_B		0x00400368	/* IO PIO Read Address Table Entry 4, Part B */
+#define		IIO_IPRTE5_B		0x00400370	/* IO PIO Read Address Table Entry 5, Part B */
+#define		IIO_IPRTE6_B		0x00400378	/* IO PIO Read Address Table Entry 6, Part B */
+#define		IIO_IPRTE7_B		0x00400380	/* IO PIO Read Address Table Entry 7, Part B */
+
+#define		IIO_IPDR		0x00400388	/* IO PIO Deallocation Register */
+#define		IIO_ICDR		0x00400390	/* IO CRB Entry Deallocation Register */
+#define		IIO_IFDR		0x00400398	/* IO IOQ FIFO Depth Register */
+#define		IIO_IIAP		0x004003A0	/* IO IIQ Arbitration Parameters */
+#define		IIO_ICMR		0x004003A8	/* IO CRB Management Register */
+#define		IIO_ICCR		0x004003B0	/* IO CRB Control Register */
+#define		IIO_ICTO		0x004003B8	/* IO CRB Timeout   */
+#define		IIO_ICTP		0x004003C0	/* IO CRB Timeout Prescalar */
+
+#define		IIO_ICRB0_A		0x00400400	/* IO CRB Entry 0_A */
+#define		IIO_ICRB0_B		0x00400408	/* IO CRB Entry 0_B */
+#define		IIO_ICRB0_C		0x00400410	/* IO CRB Entry 0_C */
+#define		IIO_ICRB0_D		0x00400418	/* IO CRB Entry 0_D */
+#define		IIO_ICRB0_E		0x00400420	/* IO CRB Entry 0_E */
+
+#define		IIO_ICRB1_A		0x00400430	/* IO CRB Entry 1_A */
+#define		IIO_ICRB1_B		0x00400438	/* IO CRB Entry 1_B */
+#define		IIO_ICRB1_C		0x00400440	/* IO CRB Entry 1_C */
+#define		IIO_ICRB1_D		0x00400448	/* IO CRB Entry 1_D */
+#define		IIO_ICRB1_E		0x00400450	/* IO CRB Entry 1_E */
+
+#define		IIO_ICRB2_A		0x00400460	/* IO CRB Entry 2_A */
+#define		IIO_ICRB2_B		0x00400468	/* IO CRB Entry 2_B */
+#define		IIO_ICRB2_C		0x00400470	/* IO CRB Entry 2_C */
+#define		IIO_ICRB2_D		0x00400478	/* IO CRB Entry 2_D */
+#define		IIO_ICRB2_E		0x00400480	/* IO CRB Entry 2_E */
+
+#define		IIO_ICRB3_A		0x00400490	/* IO CRB Entry 3_A */
+#define		IIO_ICRB3_B		0x00400498	/* IO CRB Entry 3_B */
+#define		IIO_ICRB3_C		0x004004a0	/* IO CRB Entry 3_C */
+#define		IIO_ICRB3_D		0x004004a8	/* IO CRB Entry 3_D */
+#define		IIO_ICRB3_E		0x004004b0	/* IO CRB Entry 3_E */
+
+#define		IIO_ICRB4_A		0x004004c0	/* IO CRB Entry 4_A */
+#define		IIO_ICRB4_B		0x004004c8	/* IO CRB Entry 4_B */
+#define		IIO_ICRB4_C		0x004004d0	/* IO CRB Entry 4_C */
+#define		IIO_ICRB4_D		0x004004d8	/* IO CRB Entry 4_D */
+#define		IIO_ICRB4_E		0x004004e0	/* IO CRB Entry 4_E */
+
+#define		IIO_ICRB5_A		0x004004f0	/* IO CRB Entry 5_A */
+#define		IIO_ICRB5_B		0x004004f8	/* IO CRB Entry 5_B */
+#define		IIO_ICRB5_C		0x00400500	/* IO CRB Entry 5_C */
+#define		IIO_ICRB5_D		0x00400508	/* IO CRB Entry 5_D */
+#define		IIO_ICRB5_E		0x00400510	/* IO CRB Entry 5_E */
+
+#define		IIO_ICRB6_A		0x00400520	/* IO CRB Entry 6_A */
+#define		IIO_ICRB6_B		0x00400528	/* IO CRB Entry 6_B */
+#define		IIO_ICRB6_C		0x00400530	/* IO CRB Entry 6_C */
+#define		IIO_ICRB6_D		0x00400538	/* IO CRB Entry 6_D */
+#define		IIO_ICRB6_E		0x00400540	/* IO CRB Entry 6_E */
+
+#define		IIO_ICRB7_A		0x00400550	/* IO CRB Entry 7_A */
+#define		IIO_ICRB7_B		0x00400558	/* IO CRB Entry 7_B */
+#define		IIO_ICRB7_C		0x00400560	/* IO CRB Entry 7_C */
+#define		IIO_ICRB7_D		0x00400568	/* IO CRB Entry 7_D */
+#define		IIO_ICRB7_E		0x00400570	/* IO CRB Entry 7_E */
+
+#define		IIO_ICRB8_A		0x00400580	/* IO CRB Entry 8_A */
+#define		IIO_ICRB8_B		0x00400588	/* IO CRB Entry 8_B */
+#define		IIO_ICRB8_C		0x00400590	/* IO CRB Entry 8_C */
+#define		IIO_ICRB8_D		0x00400598	/* IO CRB Entry 8_D */
+#define		IIO_ICRB8_E		0x004005a0	/* IO CRB Entry 8_E */
+
+#define		IIO_ICRB9_A		0x004005b0	/* IO CRB Entry 9_A */
+#define		IIO_ICRB9_B		0x004005b8	/* IO CRB Entry 9_B */
+#define		IIO_ICRB9_C		0x004005c0	/* IO CRB Entry 9_C */
+#define		IIO_ICRB9_D		0x004005c8	/* IO CRB Entry 9_D */
+#define		IIO_ICRB9_E		0x004005d0	/* IO CRB Entry 9_E */
+
+#define		IIO_ICRBA_A		0x004005e0	/* IO CRB Entry A_A */
+#define		IIO_ICRBA_B		0x004005e8	/* IO CRB Entry A_B */
+#define		IIO_ICRBA_C		0x004005f0	/* IO CRB Entry A_C */
+#define		IIO_ICRBA_D		0x004005f8	/* IO CRB Entry A_D */
+#define		IIO_ICRBA_E		0x00400600	/* IO CRB Entry A_E */
+
+#define		IIO_ICRBB_A		0x00400610	/* IO CRB Entry B_A */
+#define		IIO_ICRBB_B		0x00400618	/* IO CRB Entry B_B */
+#define		IIO_ICRBB_C		0x00400620	/* IO CRB Entry B_C */
+#define		IIO_ICRBB_D		0x00400628	/* IO CRB Entry B_D */
+#define		IIO_ICRBB_E		0x00400630	/* IO CRB Entry B_E */
+
+#define		IIO_ICRBC_A		0x00400640	/* IO CRB Entry C_A */
+#define		IIO_ICRBC_B		0x00400648	/* IO CRB Entry C_B */
+#define		IIO_ICRBC_C		0x00400650	/* IO CRB Entry C_C */
+#define		IIO_ICRBC_D		0x00400658	/* IO CRB Entry C_D */
+#define		IIO_ICRBC_E		0x00400660	/* IO CRB Entry C_E */
+
+#define		IIO_ICRBD_A		0x00400670	/* IO CRB Entry D_A */
+#define		IIO_ICRBD_B		0x00400678	/* IO CRB Entry D_B */
+#define		IIO_ICRBD_C		0x00400680	/* IO CRB Entry D_C */
+#define		IIO_ICRBD_D		0x00400688	/* IO CRB Entry D_D */
+#define		IIO_ICRBD_E		0x00400690	/* IO CRB Entry D_E */
+
+#define		IIO_ICRBE_A		0x004006a0	/* IO CRB Entry E_A */
+#define		IIO_ICRBE_B		0x004006a8	/* IO CRB Entry E_B */
+#define		IIO_ICRBE_C		0x004006b0	/* IO CRB Entry E_C */
+#define		IIO_ICRBE_D		0x004006b8	/* IO CRB Entry E_D */
+#define		IIO_ICRBE_E		0x004006c0	/* IO CRB Entry E_E */
+
+#define		IIO_ICSML		0x00400700	/* IO CRB Spurious Message Low */
+#define		IIO_ICSMM		0x00400708	/* IO CRB Spurious Message Middle */
+#define		IIO_ICSMH		0x00400710	/* IO CRB Spurious Message High */
+
+#define		IIO_IDBSS		0x00400718	/* IO Debug Submenu Select */
+
+#define		IIO_IBLS0		0x00410000	/* IO BTE Length Status 0 */
+#define		IIO_IBSA0		0x00410008	/* IO BTE Source Address 0 */
+#define		IIO_IBDA0		0x00410010	/* IO BTE Destination Address 0 */
+#define		IIO_IBCT0		0x00410018	/* IO BTE Control Terminate 0 */
+#define		IIO_IBNA0		0x00410020	/* IO BTE Notification Address 0 */
+#define		IIO_IBIA0		0x00410028	/* IO BTE Interrupt Address 0 */
+#define		IIO_IBLS1		0x00420000	/* IO BTE Length Status 1 */
+#define		IIO_IBSA1		0x00420008	/* IO BTE Source Address 1 */
+#define		IIO_IBDA1		0x00420010	/* IO BTE Destination Address 1 */
+#define		IIO_IBCT1		0x00420018	/* IO BTE Control Terminate 1 */
+#define		IIO_IBNA1		0x00420020	/* IO BTE Notification Address 1 */
+#define		IIO_IBIA1		0x00420028	/* IO BTE Interrupt Address 1 */
+
+#define		IIO_IPCR		0x00430000	/* IO Performance Control */
+#define		IIO_IPPR		0x00430008	/* IO Performance Profiling */
+
+/************************************************************************
+ *									*
+ * Description:  This register echoes some information from the         *
+ * LB_REV_ID register. It is available through Crosstalk as described   *
+ * above. The REV_NUM and MFG_NUM fields receive their values from      *
+ * the REVISION and MANUFACTURER fields in the LB_REV_ID register.      *
+ * The PART_NUM field's value is the Crosstalk device ID number that    *
+ * Steve Miller assigned to the SHub chip.                              *
+ *									*
+ ************************************************************************/
+
+typedef union ii_wid_u {
+	u64 ii_wid_regval;
+	struct {
+		u64 w_rsvd_1:1;
+		u64 w_mfg_num:11;
+		u64 w_part_num:16;
+		u64 w_rev_num:4;
+		u64 w_rsvd:32;
+	} ii_wid_fld_s;
+} ii_wid_u_t;
+
+/************************************************************************
+ *									*
+ *  The fields in this register are set upon detection of an error      *
+ * and cleared by various mechanisms, as explained in the               *
+ * description.                                                         *
+ *									*
+ ************************************************************************/
+
+typedef union ii_wstat_u {
+	u64 ii_wstat_regval;
+	struct {
+		u64 w_pending:4;
+		u64 w_xt_crd_to:1;
+		u64 w_xt_tail_to:1;
+		u64 w_rsvd_3:3;
+		u64 w_tx_mx_rty:1;
+		u64 w_rsvd_2:6;
+		u64 w_llp_tx_cnt:8;
+		u64 w_rsvd_1:8;
+		u64 w_crazy:1;
+		u64 w_rsvd:31;
+	} ii_wstat_fld_s;
+} ii_wstat_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This is a read-write enabled register. It controls     *
+ * various aspects of the Crosstalk flow control.                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_wcr_u {
+	u64 ii_wcr_regval;
+	struct {
+		u64 w_wid:4;
+		u64 w_tag:1;
+		u64 w_rsvd_1:8;
+		u64 w_dst_crd:3;
+		u64 w_f_bad_pkt:1;
+		u64 w_dir_con:1;
+		u64 w_e_thresh:5;
+		u64 w_rsvd:41;
+	} ii_wcr_fld_s;
+} ii_wcr_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register's value is a bit vector that guards      *
+ * access to local registers within the II as well as to external       *
+ * Crosstalk widgets. Each bit in the register corresponds to a         *
+ * particular region in the system; a region consists of one, two or    *
+ * four nodes (depending on the value of the REGION_SIZE field in the   *
+ * LB_REV_ID register, which is documented in Section 8.3.1.1). The     *
+ * protection provided by this register applies to PIO read             *
+ * operations as well as PIO write operations. The II will perform a    *
+ * PIO read or write request only if the bit for the requestor's        *
+ * region is set; otherwise, the II will not perform the requested      *
+ * operation and will return an error response. When a PIO read or      *
+ * write request targets an external Crosstalk widget, then not only    *
+ * must the bit for the requestor's region be set in the ILAPR, but     *
+ * also the target widget's bit in the IOWA register must be set in     *
+ * order for the II to perform the requested operation; otherwise,      *
+ * the II will return an error response. Hence, the protection          *
+ * provided by the IOWA register supplements the protection provided    *
+ * by the ILAPR for requests that target external Crosstalk widgets.    *
+ * This register itself can be accessed only by the nodes whose         *
+ * region ID bits are enabled in this same register. It can also be     *
+ * accessed through the IAlias space by the local processors.           *
+ * The reset value of this register allows access by all nodes.         *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ilapr_u {
+	u64 ii_ilapr_regval;
+	struct {
+		u64 i_region:64;
+	} ii_ilapr_fld_s;
+} ii_ilapr_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  A write to this register of the 64-bit value           *
+ * "SGIrules" in ASCII, will cause the bit in the ILAPR register        *
+ * corresponding to the region of the requestor to be set (allow        *
+ * access). A write of any other value will be ignored. Access          *
+ * protection for this register is "SGIrules".                          *
+ * This register can also be accessed through the IAlias space.         *
+ * However, this access will not change the access permissions in the   *
+ * ILAPR.                                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ilapo_u {
+	u64 ii_ilapo_regval;
+	struct {
+		u64 i_io_ovrride:64;
+	} ii_ilapo_fld_s;
+} ii_ilapo_u_t;
+
+/************************************************************************
+ *									*
+ *  This register qualifies all the PIO and Graphics writes launched    *
+ * from the SHUB towards a widget.                                      *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iowa_u {
+	u64 ii_iowa_regval;
+	struct {
+		u64 i_w0_oac:1;
+		u64 i_rsvd_1:7;
+		u64 i_wx_oac:8;
+		u64 i_rsvd:48;
+	} ii_iowa_fld_s;
+} ii_iowa_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register qualifies all the requests launched      *
+ * from a widget towards the Shub. This register is intended to be      *
+ * used by software in case of misbehaving widgets.                     *
+ *									*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iiwa_u {
+	u64 ii_iiwa_regval;
+	struct {
+		u64 i_w0_iac:1;
+		u64 i_rsvd_1:7;
+		u64 i_wx_iac:8;
+		u64 i_rsvd:48;
+	} ii_iiwa_fld_s;
+} ii_iiwa_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register qualifies all the operations launched    *
+ * from a widget towards the SHub. It allows individual access          *
+ * control for up to 8 devices per widget. A device refers to           *
+ * individual DMA master hosted by a widget.                            *
+ * The bits in each field of this register are cleared by the Shub      *
+ * upon detection of an error which requires the device to be           *
+ * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric    *
+ * Crosstalk). Whether or not a device has access rights to this        *
+ * Shub is determined by an AND of the device enable bit in the         *
+ * appropriate field of this register and the corresponding bit in      *
+ * the Wx_IAC field (for the widget which this device belongs to).      *
+ * The bits in this field are set by writing a 1 to them. Incoming      *
+ * replies from Crosstalk are not subject to this access control        *
+ * mechanism.                                                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iidem_u {
+	u64 ii_iidem_regval;
+	struct {
+		u64 i_w8_dxs:8;
+		u64 i_w9_dxs:8;
+		u64 i_wa_dxs:8;
+		u64 i_wb_dxs:8;
+		u64 i_wc_dxs:8;
+		u64 i_wd_dxs:8;
+		u64 i_we_dxs:8;
+		u64 i_wf_dxs:8;
+	} ii_iidem_fld_s;
+} ii_iidem_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the various programmable fields necessary    *
+ * for controlling and observing the LLP signals.                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ilcsr_u {
+	u64 ii_ilcsr_regval;
+	struct {
+		u64 i_nullto:6;
+		u64 i_rsvd_4:2;
+		u64 i_wrmrst:1;
+		u64 i_rsvd_3:1;
+		u64 i_llp_en:1;
+		u64 i_bm8:1;
+		u64 i_llp_stat:2;
+		u64 i_remote_power:1;
+		u64 i_rsvd_2:1;
+		u64 i_maxrtry:10;
+		u64 i_d_avail_sel:2;
+		u64 i_rsvd_1:4;
+		u64 i_maxbrst:10;
+		u64 i_rsvd:22;
+
+	} ii_ilcsr_fld_s;
+} ii_ilcsr_u_t;
+
+/************************************************************************
+ *									*
+ *  This is simply a status registers that monitors the LLP error       *
+ * rate.								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_illr_u {
+	u64 ii_illr_regval;
+	struct {
+		u64 i_sn_cnt:16;
+		u64 i_cb_cnt:16;
+		u64 i_rsvd:32;
+	} ii_illr_fld_s;
+} ii_illr_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  All II-detected non-BTE error interrupts are           *
+ * specified via this register.                                         *
+ * NOTE: The PI interrupt register address is hardcoded in the II. If   *
+ * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI      *
+ * packet) to address offset 0x0180_0090 within the local register      *
+ * address space of PI0 on the node specified by the NODE field. If     *
+ * PI_ID==1, then the II sends the interrupt request to address         *
+ * offset 0x01A0_0090 within the local register address space of PI1    *
+ * on the node specified by the NODE field.                             *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iidsr_u {
+	u64 ii_iidsr_regval;
+	struct {
+		u64 i_level:8;
+		u64 i_pi_id:1;
+		u64 i_node:11;
+		u64 i_rsvd_3:4;
+		u64 i_enable:1;
+		u64 i_rsvd_2:3;
+		u64 i_int_sent:2;
+		u64 i_rsvd_1:2;
+		u64 i_pi0_forward_int:1;
+		u64 i_pi1_forward_int:1;
+		u64 i_rsvd:30;
+	} ii_iidsr_fld_s;
+} ii_iidsr_u_t;
+
+/************************************************************************
+ *									*
+ *  There are two instances of this register. This register is used     *
+ * for matching up the incoming responses from the graphics widget to   *
+ * the processor that initiated the graphics operation. The             *
+ * write-responses are converted to graphics credits and returned to    *
+ * the processor so that the processor interface can manage the flow    *
+ * control.                                                             *
+ *									*
+ ************************************************************************/
+
+typedef union ii_igfx0_u {
+	u64 ii_igfx0_regval;
+	struct {
+		u64 i_w_num:4;
+		u64 i_pi_id:1;
+		u64 i_n_num:12;
+		u64 i_p_num:1;
+		u64 i_rsvd:46;
+	} ii_igfx0_fld_s;
+} ii_igfx0_u_t;
+
+/************************************************************************
+ *									*
+ *  There are two instances of this register. This register is used     *
+ * for matching up the incoming responses from the graphics widget to   *
+ * the processor that initiated the graphics operation. The             *
+ * write-responses are converted to graphics credits and returned to    *
+ * the processor so that the processor interface can manage the flow    *
+ * control.                                                             *
+ *									*
+ ************************************************************************/
+
+typedef union ii_igfx1_u {
+	u64 ii_igfx1_regval;
+	struct {
+		u64 i_w_num:4;
+		u64 i_pi_id:1;
+		u64 i_n_num:12;
+		u64 i_p_num:1;
+		u64 i_rsvd:46;
+	} ii_igfx1_fld_s;
+} ii_igfx1_u_t;
+
+/************************************************************************
+ *									*
+ *  There are two instances of this registers. These registers are      *
+ * used as scratch registers for software use.                          *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iscr0_u {
+	u64 ii_iscr0_regval;
+	struct {
+		u64 i_scratch:64;
+	} ii_iscr0_fld_s;
+} ii_iscr0_u_t;
+
+/************************************************************************
+ *									*
+ *  There are two instances of this registers. These registers are      *
+ * used as scratch registers for software use.                          *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iscr1_u {
+	u64 ii_iscr1_regval;
+	struct {
+		u64 i_scratch:64;
+	} ii_iscr1_fld_s;
+} ii_iscr1_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Shub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the SHub is thus the lower 16 GBytes per widget       * 
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Shub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte1_u {
+	u64 ii_itte1_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte1_fld_s;
+} ii_itte1_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Shub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Shub is thus the lower 16 GBytes per widget       *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Shub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte2_u {
+	u64 ii_itte2_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte2_fld_s;
+} ii_itte2_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Shub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Shub is thus the lower 16 GBytes per widget       *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the SHub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte3_u {
+	u64 ii_itte3_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte3_fld_s;
+} ii_itte3_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a SHub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the SHub is thus the lower 16 GBytes per widget       *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the SHub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte4_u {
+	u64 ii_itte4_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte4_fld_s;
+} ii_itte4_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a SHub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Shub is thus the lower 16 GBytes per widget       *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Shub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte5_u {
+	u64 ii_itte5_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte5_fld_s;
+} ii_itte5_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Shub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Shub is thus the lower 16 GBytes per widget       *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the Shub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte6_u {
+	u64 ii_itte6_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte6_fld_s;
+} ii_itte6_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are seven instances of translation table entry   *
+ * registers. Each register maps a Shub Big Window to a 48-bit          *
+ * address on Crosstalk.                                                *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window      *
+ * number) are used to select one of these 7 registers. The Widget      *
+ * number field is then derived from the W_NUM field for synthesizing   *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with       *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34]      *
+ * are padded with zeros. Although the maximum Crosstalk space          *
+ * addressable by the Shub is thus the lower 16 GBytes per widget       *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this       *
+ * space can be accessed.                                               *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big         *
+ * Window number) are used to select one of these 7 registers. The      *
+ * Widget number field is then derived from the W_NUM field for         *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are            *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP      *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk       *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum   *
+ * Crosstalk space addressable by the SHub is thus the lower            *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB>   *
+ * of this space can be accessed.                                       *
+ *									*
+ ************************************************************************/
+
+typedef union ii_itte7_u {
+	u64 ii_itte7_regval;
+	struct {
+		u64 i_offset:5;
+		u64 i_rsvd_1:3;
+		u64 i_w_num:4;
+		u64 i_iosp:1;
+		u64 i_rsvd:51;
+	} ii_itte7_fld_s;
+} ii_itte7_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprb0_u {
+	u64 ii_iprb0_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprb0_fld_s;
+} ii_iprb0_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprb8_u {
+	u64 ii_iprb8_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprb8_fld_s;
+} ii_iprb8_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprb9_u {
+	u64 ii_iprb9_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprb9_fld_s;
+} ii_iprb9_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.        *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ *									*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprba_u {
+	u64 ii_iprba_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprba_fld_s;
+} ii_iprba_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprbb_u {
+	u64 ii_iprbb_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprbb_fld_s;
+} ii_iprbb_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprbc_u {
+	u64 ii_iprbc_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprbc_fld_s;
+} ii_iprbc_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprbd_u {
+	u64 ii_iprbd_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprbd_fld_s;
+} ii_iprbd_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of SHub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprbe_u {
+	u64 ii_iprbe_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprbe_fld_s;
+} ii_iprbe_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 9 instances of this register, one per        *
+ * actual widget in this implementation of Shub and Crossbow.           *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0      *
+ * refers to Crossbow's internal space.                                 *
+ * This register contains the state elements per widget that are        *
+ * necessary to manage the PIO flow control on Crosstalk and on the     *
+ * Router Network. See the PIO Flow Control chapter for a complete      *
+ * description of this register                                         *
+ * The SPUR_WR bit requires some explanation. When this register is     *
+ * written, the new value of the C field is captured in an internal     *
+ * register so the hardware can remember what the programmer wrote      *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field   *
+ * increments above this stored value, which indicates that there       *
+ * have been more responses received than requests sent. The SPUR_WR    *
+ * bit cannot be cleared until a value is written to the IPRBx          *
+ * register; the write will correct the C field and capture its new     *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the   *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written.           *
+ * .    								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprbf_u {
+	u64 ii_iprbf_regval;
+	struct {
+		u64 i_c:8;
+		u64 i_na:14;
+		u64 i_rsvd_2:2;
+		u64 i_nb:14;
+		u64 i_rsvd_1:2;
+		u64 i_m:2;
+		u64 i_f:1;
+		u64 i_of_cnt:5;
+		u64 i_error:1;
+		u64 i_rd_to:1;
+		u64 i_spur_wr:1;
+		u64 i_spur_rd:1;
+		u64 i_rsvd:11;
+		u64 i_mult_err:1;
+	} ii_iprbe_fld_s;
+} ii_iprbf_u_t;
+
+/************************************************************************
+ *									*
+ *  This register specifies the timeout value to use for monitoring     *
+ * Crosstalk credits which are used outbound to Crosstalk. An           *
+ * internal counter called the Crosstalk Credit Timeout Counter         *
+ * increments every 128 II clocks. The counter starts counting          *
+ * anytime the credit count drops below a threshold, and resets to      *
+ * zero (stops counting) anytime the credit count is at or above the    *
+ * threshold. The threshold is 1 credit in direct connect mode and 2    *
+ * in Crossbow connect mode. When the internal Crosstalk Credit         *
+ * Timeout Counter reaches the value programmed in this register, a     *
+ * Crosstalk Credit Timeout has occurred. The internal counter is not   *
+ * readable from software, and stops counting at its maximum value,     *
+ * so it cannot cause more than one interrupt.                          *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ixcc_u {
+	u64 ii_ixcc_regval;
+	struct {
+		u64 i_time_out:26;
+		u64 i_rsvd:38;
+	} ii_ixcc_fld_s;
+} ii_ixcc_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register qualifies all the PIO and DMA            *
+ * operations launched from widget 0 towards the SHub. In               *
+ * addition, it also qualifies accesses by the BTE streams.             *
+ * The bits in each field of this register are cleared by the SHub      *
+ * upon detection of an error which requires widget 0 or the BTE        *
+ * streams to be terminated. Whether or not widget x has access         *
+ * rights to this SHub is determined by an AND of the device            *
+ * enable bit in the appropriate field of this register and bit 0 in    *
+ * the Wx_IAC field. The bits in this field are set by writing a 1 to   *
+ * them. Incoming replies from Crosstalk are not subject to this        *
+ * access control mechanism.                                            *
+ *									*
+ ************************************************************************/
+
+typedef union ii_imem_u {
+	u64 ii_imem_regval;
+	struct {
+		u64 i_w0_esd:1;
+		u64 i_rsvd_3:3;
+		u64 i_b0_esd:1;
+		u64 i_rsvd_2:3;
+		u64 i_b1_esd:1;
+		u64 i_rsvd_1:3;
+		u64 i_clr_precise:1;
+		u64 i_rsvd:51;
+	} ii_imem_fld_s;
+} ii_imem_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register specifies the timeout value to use for   *
+ * monitoring Crosstalk tail flits coming into the Shub in the          *
+ * TAIL_TO field. An internal counter associated with this register     *
+ * is incremented every 128 II internal clocks (7 bits). The counter    *
+ * starts counting anytime a header micropacket is received and stops   *
+ * counting (and resets to zero) any time a micropacket with a Tail     *
+ * bit is received. Once the counter reaches the threshold value        *
+ * programmed in this register, it generates an interrupt to the        *
+ * processor that is programmed into the IIDSR. The counter saturates   *
+ * (does not roll over) at its maximum value, so it cannot cause        *
+ * another interrupt until after it is cleared.                         *
+ * The register also contains the Read Response Timeout values. The     *
+ * Prescalar is 23 bits, and counts II clocks. An internal counter      *
+ * increments on every II clock and when it reaches the value in the    *
+ * Prescalar field, all IPRTE registers with their valid bits set       *
+ * have their Read Response timers bumped. Whenever any of them match   *
+ * the value in the RRSP_TO field, a Read Response Timeout has          *
+ * occurred, and error handling occurs as described in the Error        *
+ * Handling section of this document.                                   *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ixtt_u {
+	u64 ii_ixtt_regval;
+	struct {
+		u64 i_tail_to:26;
+		u64 i_rsvd_1:6;
+		u64 i_rrsp_ps:23;
+		u64 i_rrsp_to:5;
+		u64 i_rsvd:4;
+	} ii_ixtt_fld_s;
+} ii_ixtt_u_t;
+
+/************************************************************************
+ *									*
+ *  Writing a 1 to the fields of this register clears the appropriate   *
+ * error bits in other areas of SHub. Note that when the                *
+ * E_PRB_x bits are used to clear error bits in PRB registers,          *
+ * SPUR_RD and SPUR_WR may persist, because they require additional     *
+ * action to clear them. See the IPRBx and IXSS Register                *
+ * specifications.                                                      *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ieclr_u {
+	u64 ii_ieclr_regval;
+	struct {
+		u64 i_e_prb_0:1;
+		u64 i_rsvd:7;
+		u64 i_e_prb_8:1;
+		u64 i_e_prb_9:1;
+		u64 i_e_prb_a:1;
+		u64 i_e_prb_b:1;
+		u64 i_e_prb_c:1;
+		u64 i_e_prb_d:1;
+		u64 i_e_prb_e:1;
+		u64 i_e_prb_f:1;
+		u64 i_e_crazy:1;
+		u64 i_e_bte_0:1;
+		u64 i_e_bte_1:1;
+		u64 i_reserved_1:10;
+		u64 i_spur_rd_hdr:1;
+		u64 i_cam_intr_to:1;
+		u64 i_cam_overflow:1;
+		u64 i_cam_read_miss:1;
+		u64 i_ioq_rep_underflow:1;
+		u64 i_ioq_req_underflow:1;
+		u64 i_ioq_rep_overflow:1;
+		u64 i_ioq_req_overflow:1;
+		u64 i_iiq_rep_overflow:1;
+		u64 i_iiq_req_overflow:1;
+		u64 i_ii_xn_rep_cred_overflow:1;
+		u64 i_ii_xn_req_cred_overflow:1;
+		u64 i_ii_xn_invalid_cmd:1;
+		u64 i_xn_ii_invalid_cmd:1;
+		u64 i_reserved_2:21;
+	} ii_ieclr_fld_s;
+} ii_ieclr_u_t;
+
+/************************************************************************
+ *									*
+ *  This register controls both BTEs. SOFT_RESET is intended for        *
+ * recovery after an error. COUNT controls the total number of CRBs     *
+ * that both BTEs (combined) can use, which affects total BTE           *
+ * bandwidth.                                                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibcr_u {
+	u64 ii_ibcr_regval;
+	struct {
+		u64 i_count:4;
+		u64 i_rsvd_1:4;
+		u64 i_soft_reset:1;
+		u64 i_rsvd:55;
+	} ii_ibcr_fld_s;
+} ii_ibcr_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the header of a spurious read response       *
+ * received from Crosstalk. A spurious read response is defined as a    *
+ * read response received by II from a widget for which (1) the SIDN    *
+ * has a value between 1 and 7, inclusive (II never sends requests to   *
+ * these widgets (2) there is no valid IPRTE register which             *
+ * corresponds to the TNUM, or (3) the widget indicated in SIDN is      *
+ * not the same as the widget recorded in the IPRTE register            *
+ * referenced by the TNUM. If this condition is true, and if the        *
+ * IXSS[VALID] bit is clear, then the header of the spurious read       *
+ * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The    *
+ * errant header is thereby captured, and no further spurious read      *
+ * respones are captured until IXSS[VALID] is cleared by setting the    *
+ * appropriate bit in IECLR. Every time a spurious read response is     *
+ * detected, the SPUR_RD bit of the PRB corresponding to the incoming   *
+ * message's SIDN field is set. This always happens, regarless of       *
+ * whether a header is captured. The programmer should check            *
+ * IXSM[SIDN] to determine which widget sent the spurious response,     *
+ * because there may be more than one SPUR_RD bit set in the PRB        *
+ * registers. The widget indicated by IXSM[SIDN] was the first          *
+ * spurious read response to be received since the last time            *
+ * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB      *
+ * will be set. Any SPUR_RD bits in any other PRB registers indicate    *
+ * spurious messages from other widets which were detected after the    *
+ * header was captured..                                                *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ixsm_u {
+	u64 ii_ixsm_regval;
+	struct {
+		u64 i_byte_en:32;
+		u64 i_reserved:1;
+		u64 i_tag:3;
+		u64 i_alt_pactyp:4;
+		u64 i_bo:1;
+		u64 i_error:1;
+		u64 i_vbpm:1;
+		u64 i_gbr:1;
+		u64 i_ds:2;
+		u64 i_ct:1;
+		u64 i_tnum:5;
+		u64 i_pactyp:4;
+		u64 i_sidn:4;
+		u64 i_didn:4;
+	} ii_ixsm_fld_s;
+} ii_ixsm_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the sideband bits of a spurious read         *
+ * response received from Crosstalk.                                    *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ixss_u {
+	u64 ii_ixss_regval;
+	struct {
+		u64 i_sideband:8;
+		u64 i_rsvd:55;
+		u64 i_valid:1;
+	} ii_ixss_fld_s;
+} ii_ixss_u_t;
+
+/************************************************************************
+ *									*
+ *  This register enables software to access the II LLP's test port.    *
+ * Refer to the LLP 2.5 documentation for an explanation of the test    *
+ * port. Software can write to this register to program the values      *
+ * for the control fields (TestErrCapture, TestClear, TestFlit,         *
+ * TestMask and TestSeed). Similarly, software can read from this       *
+ * register to obtain the values of the test port's status outputs      *
+ * (TestCBerr, TestValid and TestData).                                 *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ilct_u {
+	u64 ii_ilct_regval;
+	struct {
+		u64 i_test_seed:20;
+		u64 i_test_mask:8;
+		u64 i_test_data:20;
+		u64 i_test_valid:1;
+		u64 i_test_cberr:1;
+		u64 i_test_flit:3;
+		u64 i_test_clear:1;
+		u64 i_test_err_capture:1;
+		u64 i_rsvd:9;
+	} ii_ilct_fld_s;
+} ii_ilct_u_t;
+
+/************************************************************************
+ *									*
+ *  If the II detects an illegal incoming Duplonet packet (request or   *
+ * reply) when VALID==0 in the IIEPH1 register, then it saves the       *
+ * contents of the packet's header flit in the IIEPH1 and IIEPH2        *
+ * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit,     *
+ * and assigns a value to the ERR_TYPE field which indicates the        *
+ * specific nature of the error. The II recognizes four different       *
+ * types of errors: short request packets (ERR_TYPE==2), short reply    *
+ * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long   *
+ * reply packets (ERR_TYPE==5). The encodings for these types of        *
+ * errors were chosen to be consistent with the same types of errors    *
+ * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in    *
+ * the LB unit). If the II detects an illegal incoming Duplonet         *
+ * packet when VALID==1 in the IIEPH1 register, then it merely sets     *
+ * the OVERRUN bit to indicate that a subsequent error has happened,    *
+ * and does nothing further.                                            *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iieph1_u {
+	u64 ii_iieph1_regval;
+	struct {
+		u64 i_command:7;
+		u64 i_rsvd_5:1;
+		u64 i_suppl:14;
+		u64 i_rsvd_4:1;
+		u64 i_source:14;
+		u64 i_rsvd_3:1;
+		u64 i_err_type:4;
+		u64 i_rsvd_2:4;
+		u64 i_overrun:1;
+		u64 i_rsvd_1:3;
+		u64 i_valid:1;
+		u64 i_rsvd:13;
+	} ii_iieph1_fld_s;
+} ii_iieph1_u_t;
+
+/************************************************************************
+ *									*
+ *  This register holds the Address field from the header flit of an    *
+ * incoming erroneous Duplonet packet, along with the tail bit which    *
+ * accompanied this header flit. This register is essentially an        *
+ * extension of IIEPH1. Two registers were necessary because the 64     *
+ * bits available in only a single register were insufficient to        *
+ * capture the entire header flit of an erroneous packet.               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iieph2_u {
+	u64 ii_iieph2_regval;
+	struct {
+		u64 i_rsvd_0:3;
+		u64 i_address:47;
+		u64 i_rsvd_1:10;
+		u64 i_tail:1;
+		u64 i_rsvd:3;
+	} ii_iieph2_fld_s;
+} ii_iieph2_u_t;
+
+/******************************/
+
+/************************************************************************
+ *									*
+ *  This register's value is a bit vector that guards access from SXBs  *
+ * to local registers within the II as well as to external Crosstalk    *
+ * widgets								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_islapr_u {
+	u64 ii_islapr_regval;
+	struct {
+		u64 i_region:64;
+	} ii_islapr_fld_s;
+} ii_islapr_u_t;
+
+/************************************************************************
+ *									*
+ *  A write to this register of the 56-bit value "Pup+Bun" will cause	*
+ * the bit in the ISLAPR register corresponding to the region of the	*
+ * requestor to be set (access allowed).				(
+ *									*
+ ************************************************************************/
+
+typedef union ii_islapo_u {
+	u64 ii_islapo_regval;
+	struct {
+		u64 i_io_sbx_ovrride:56;
+		u64 i_rsvd:8;
+	} ii_islapo_fld_s;
+} ii_islapo_u_t;
+
+/************************************************************************
+ *									*
+ *  Determines how long the wrapper will wait aftr an interrupt is	*
+ * initially issued from the II before it times out the outstanding	*
+ * interrupt and drops it from the interrupt queue.			* 
+ *									*
+ ************************************************************************/
+
+typedef union ii_iwi_u {
+	u64 ii_iwi_regval;
+	struct {
+		u64 i_prescale:24;
+		u64 i_rsvd:8;
+		u64 i_timeout:8;
+		u64 i_rsvd1:8;
+		u64 i_intrpt_retry_period:8;
+		u64 i_rsvd2:8;
+	} ii_iwi_fld_s;
+} ii_iwi_u_t;
+
+/************************************************************************
+ *									*
+ *  Log errors which have occurred in the II wrapper. The errors are	*
+ * cleared by writing to the IECLR register.				* 
+ *									*
+ ************************************************************************/
+
+typedef union ii_iwel_u {
+	u64 ii_iwel_regval;
+	struct {
+		u64 i_intr_timed_out:1;
+		u64 i_rsvd:7;
+		u64 i_cam_overflow:1;
+		u64 i_cam_read_miss:1;
+		u64 i_rsvd1:2;
+		u64 i_ioq_rep_underflow:1;
+		u64 i_ioq_req_underflow:1;
+		u64 i_ioq_rep_overflow:1;
+		u64 i_ioq_req_overflow:1;
+		u64 i_iiq_rep_overflow:1;
+		u64 i_iiq_req_overflow:1;
+		u64 i_rsvd2:6;
+		u64 i_ii_xn_rep_cred_over_under:1;
+		u64 i_ii_xn_req_cred_over_under:1;
+		u64 i_rsvd3:6;
+		u64 i_ii_xn_invalid_cmd:1;
+		u64 i_xn_ii_invalid_cmd:1;
+		u64 i_rsvd4:30;
+	} ii_iwel_fld_s;
+} ii_iwel_u_t;
+
+/************************************************************************
+ *									*
+ *  Controls the II wrapper.						* 
+ *									*
+ ************************************************************************/
+
+typedef union ii_iwc_u {
+	u64 ii_iwc_regval;
+	struct {
+		u64 i_dma_byte_swap:1;
+		u64 i_rsvd:3;
+		u64 i_cam_read_lines_reset:1;
+		u64 i_rsvd1:3;
+		u64 i_ii_xn_cred_over_under_log:1;
+		u64 i_rsvd2:19;
+		u64 i_xn_rep_iq_depth:5;
+		u64 i_rsvd3:3;
+		u64 i_xn_req_iq_depth:5;
+		u64 i_rsvd4:3;
+		u64 i_iiq_depth:6;
+		u64 i_rsvd5:12;
+		u64 i_force_rep_cred:1;
+		u64 i_force_req_cred:1;
+	} ii_iwc_fld_s;
+} ii_iwc_u_t;
+
+/************************************************************************
+ *									*
+ *  Status in the II wrapper.						* 
+ *									*
+ ************************************************************************/
+
+typedef union ii_iws_u {
+	u64 ii_iws_regval;
+	struct {
+		u64 i_xn_rep_iq_credits:5;
+		u64 i_rsvd:3;
+		u64 i_xn_req_iq_credits:5;
+		u64 i_rsvd1:51;
+	} ii_iws_fld_s;
+} ii_iws_u_t;
+
+/************************************************************************
+ *									*
+ *  Masks errors in the IWEL register.					*
+ *									*
+ ************************************************************************/
+
+typedef union ii_iweim_u {
+	u64 ii_iweim_regval;
+	struct {
+		u64 i_intr_timed_out:1;
+		u64 i_rsvd:7;
+		u64 i_cam_overflow:1;
+		u64 i_cam_read_miss:1;
+		u64 i_rsvd1:2;
+		u64 i_ioq_rep_underflow:1;
+		u64 i_ioq_req_underflow:1;
+		u64 i_ioq_rep_overflow:1;
+		u64 i_ioq_req_overflow:1;
+		u64 i_iiq_rep_overflow:1;
+		u64 i_iiq_req_overflow:1;
+		u64 i_rsvd2:6;
+		u64 i_ii_xn_rep_cred_overflow:1;
+		u64 i_ii_xn_req_cred_overflow:1;
+		u64 i_rsvd3:6;
+		u64 i_ii_xn_invalid_cmd:1;
+		u64 i_xn_ii_invalid_cmd:1;
+		u64 i_rsvd4:30;
+	} ii_iweim_fld_s;
+} ii_iweim_u_t;
+
+/************************************************************************
+ *									*
+ *  A write to this register causes a particular field in the           *
+ * corresponding widget's PRB entry to be adjusted up or down by 1.     *
+ * This counter should be used when recovering from error and reset     *
+ * conditions. Note that software would be capable of causing           *
+ * inadvertent overflow or underflow of these counters.                 *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ipca_u {
+	u64 ii_ipca_regval;
+	struct {
+		u64 i_wid:4;
+		u64 i_adjust:1;
+		u64 i_rsvd_1:3;
+		u64 i_field:2;
+		u64 i_rsvd:54;
+	} ii_ipca_fld_s;
+} ii_ipca_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte0a_u {
+	u64 ii_iprte0a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte0a_fld_s;
+} ii_iprte0a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte1a_u {
+	u64 ii_iprte1a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte1a_fld_s;
+} ii_iprte1a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte2a_u {
+	u64 ii_iprte2a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte2a_fld_s;
+} ii_iprte2a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte3a_u {
+	u64 ii_iprte3a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte3a_fld_s;
+} ii_iprte3a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte4a_u {
+	u64 ii_iprte4a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte4a_fld_s;
+} ii_iprte4a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte5a_u {
+	u64 ii_iprte5a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte5a_fld_s;
+} ii_iprte5a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte6a_u {
+	u64 ii_iprte6a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprte6a_fld_s;
+} ii_iprte6a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte7a_u {
+	u64 ii_iprte7a_regval;
+	struct {
+		u64 i_rsvd_1:54;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} ii_iprtea7_fld_s;
+} ii_iprte7a_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte0b_u {
+	u64 ii_iprte0b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte0b_fld_s;
+} ii_iprte0b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte1b_u {
+	u64 ii_iprte1b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte1b_fld_s;
+} ii_iprte1b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte2b_u {
+	u64 ii_iprte2b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte2b_fld_s;
+} ii_iprte2b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte3b_u {
+	u64 ii_iprte3b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte3b_fld_s;
+} ii_iprte3b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte4b_u {
+	u64 ii_iprte4b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte4b_fld_s;
+} ii_iprte4b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte5b_u {
+	u64 ii_iprte5b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte5b_fld_s;
+} ii_iprte5b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte6b_u {
+	u64 ii_iprte6b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+
+	} ii_iprte6b_fld_s;
+} ii_iprte6b_u_t;
+
+/************************************************************************
+ *									*
+ *  There are 8 instances of this register. This register contains      *
+ * the information that the II has to remember once it has launched a   *
+ * PIO Read operation. The contents are used to form the correct        *
+ * Router Network packet and direct the Crosstalk reply to the          *
+ * appropriate processor.                                               *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iprte7b_u {
+	u64 ii_iprte7b_regval;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_address:47;
+		u64 i_init:3;
+		u64 i_source:11;
+	} ii_iprte7b_fld_s;
+} ii_iprte7b_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  SHub II contains a feature which did not exist in      *
+ * the Hub which automatically cleans up after a Read Response          *
+ * timeout, including deallocation of the IPRTE and recovery of IBuf    *
+ * space. The inclusion of this register in SHub is for backward        *
+ * compatibility                                                        *
+ * A write to this register causes an entry from the table of           *
+ * outstanding PIO Read Requests to be freed and returned to the        *
+ * stack of free entries. This register is used in handling the         *
+ * timeout errors that result in a PIO Reply never returning from       *
+ * Crosstalk.                                                           *
+ * Note that this register does not affect the contents of the IPRTE    *
+ * registers. The Valid bits in those registers have to be              *
+ * specifically turned off by software.                                 *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ipdr_u {
+	u64 ii_ipdr_regval;
+	struct {
+		u64 i_te:3;
+		u64 i_rsvd_1:1;
+		u64 i_pnd:1;
+		u64 i_init_rpcnt:1;
+		u64 i_rsvd:58;
+	} ii_ipdr_fld_s;
+} ii_ipdr_u_t;
+
+/************************************************************************
+ *									*
+ *  A write to this register causes a CRB entry to be returned to the   *
+ * queue of free CRBs. The entry should have previously been cleared    *
+ * (mark bit) via backdoor access to the pertinent CRB entry. This      *
+ * register is used in the last step of handling the errors that are    *
+ * captured and marked in CRB entries.  Briefly: 1) first error for     *
+ * DMA write from a particular device, and first error for a            *
+ * particular BTE stream, lead to a marked CRB entry, and processor     *
+ * interrupt, 2) software reads the error information captured in the   *
+ * CRB entry, and presumably takes some corrective action, 3)           *
+ * software clears the mark bit, and finally 4) software writes to      *
+ * the ICDR register to return the CRB entry to the list of free CRB    *
+ * entries.                                                             *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icdr_u {
+	u64 ii_icdr_regval;
+	struct {
+		u64 i_crb_num:4;
+		u64 i_pnd:1;
+		u64 i_rsvd:59;
+	} ii_icdr_fld_s;
+} ii_icdr_u_t;
+
+/************************************************************************
+ *									*
+ *  This register provides debug access to two FIFOs inside of II.      *
+ * Both IOQ_MAX* fields of this register contain the instantaneous      *
+ * depth (in units of the number of available entries) of the           *
+ * associated IOQ FIFO.  A read of this register will return the        *
+ * number of free entries on each FIFO at the time of the read.  So     *
+ * when a FIFO is idle, the associated field contains the maximum       *
+ * depth of the FIFO.  This register is writable for debug reasons      *
+ * and is intended to be written with the maximum desired FIFO depth    *
+ * while the FIFO is idle. Software must assure that II is idle when    *
+ * this register is written. If there are any active entries in any     *
+ * of these FIFOs when this register is written, the results are        *
+ * undefined.                                                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ifdr_u {
+	u64 ii_ifdr_regval;
+	struct {
+		u64 i_ioq_max_rq:7;
+		u64 i_set_ioq_rq:1;
+		u64 i_ioq_max_rp:7;
+		u64 i_set_ioq_rp:1;
+		u64 i_rsvd:48;
+	} ii_ifdr_fld_s;
+} ii_ifdr_u_t;
+
+/************************************************************************
+ *									*
+ *  This register allows the II to become sluggish in removing          *
+ * messages from its inbound queue (IIQ). This will cause messages to   *
+ * back up in either virtual channel. Disabling the "molasses" mode     *
+ * subsequently allows the II to be tested under stress. In the         *
+ * sluggish ("Molasses") mode, the localized effects of congestion      *
+ * can be observed.                                                     *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iiap_u {
+	u64 ii_iiap_regval;
+	struct {
+		u64 i_rq_mls:6;
+		u64 i_rsvd_1:2;
+		u64 i_rp_mls:6;
+		u64 i_rsvd:50;
+	} ii_iiap_fld_s;
+} ii_iiap_u_t;
+
+/************************************************************************
+ *									*
+ *  This register allows several parameters of CRB operation to be      *
+ * set. Note that writing to this register can have catastrophic side   *
+ * effects, if the CRB is not quiescent, i.e. if the CRB is             *
+ * processing protocol messages when the write occurs.                  *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icmr_u {
+	u64 ii_icmr_regval;
+	struct {
+		u64 i_sp_msg:1;
+		u64 i_rd_hdr:1;
+		u64 i_rsvd_4:2;
+		u64 i_c_cnt:4;
+		u64 i_rsvd_3:4;
+		u64 i_clr_rqpd:1;
+		u64 i_clr_rppd:1;
+		u64 i_rsvd_2:2;
+		u64 i_fc_cnt:4;
+		u64 i_crb_vld:15;
+		u64 i_crb_mark:15;
+		u64 i_rsvd_1:2;
+		u64 i_precise:1;
+		u64 i_rsvd:11;
+	} ii_icmr_fld_s;
+} ii_icmr_u_t;
+
+/************************************************************************
+ *									*
+ *  This register allows control of the table portion of the CRB        *
+ * logic via software. Control operations from this register have       *
+ * priority over all incoming Crosstalk or BTE requests.                *
+ *									*
+ ************************************************************************/
+
+typedef union ii_iccr_u {
+	u64 ii_iccr_regval;
+	struct {
+		u64 i_crb_num:4;
+		u64 i_rsvd_1:4;
+		u64 i_cmd:8;
+		u64 i_pending:1;
+		u64 i_rsvd:47;
+	} ii_iccr_fld_s;
+} ii_iccr_u_t;
+
+/************************************************************************
+ *									*
+ *  This register allows the maximum timeout value to be programmed.    *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icto_u {
+	u64 ii_icto_regval;
+	struct {
+		u64 i_timeout:8;
+		u64 i_rsvd:56;
+	} ii_icto_fld_s;
+} ii_icto_u_t;
+
+/************************************************************************
+ *									*
+ *  This register allows the timeout prescalar to be programmed. An     *
+ * internal counter is associated with this register. When the          *
+ * internal counter reaches the value of the PRESCALE field, the        *
+ * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT]   *
+ * field). The internal counter resets to zero, and then continues      *
+ * counting.                                                            *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ictp_u {
+	u64 ii_ictp_regval;
+	struct {
+		u64 i_prescale:24;
+		u64 i_rsvd:40;
+	} ii_ictp_fld_s;
+} ii_ictp_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five   *
+ * registers (_A to _E) are required to read and write each entry.      *
+ * The CRB Entry registers can be conceptualized as rows and columns    *
+ * (illustrated in the table above). Each row contains the 4            *
+ * registers required for a single CRB Entry. The first doubleword      *
+ * (column) for each entry is labeled A, and the second doubleword      *
+ * (higher address) is labeled B, the third doubleword is labeled C,    *
+ * the fourth doubleword is labeled D and the fifth doubleword is       *
+ * labeled E. All CRB entries have their addresses on a quarter         *
+ * cacheline aligned boundary.                   *
+ * Upon reset, only the following fields are initialized: valid         *
+ * (VLD), priority count, timeout, timeout valid, and context valid.    *
+ * All other bits should be cleared by software before use (after       *
+ * recovering any potential error state from before the reset).         *
+ * The following four tables summarize the format for the four          *
+ * registers that are used for each ICRB# Entry.                        *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icrb0_a_u {
+	u64 ii_icrb0_a_regval;
+	struct {
+		u64 ia_iow:1;
+		u64 ia_vld:1;
+		u64 ia_addr:47;
+		u64 ia_tnum:5;
+		u64 ia_sidn:4;
+		u64 ia_rsvd:6;
+	} ii_icrb0_a_fld_s;
+} ii_icrb0_a_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five   *
+ * registers (_A to _E) are required to read and write each entry.      *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icrb0_b_u {
+	u64 ii_icrb0_b_regval;
+	struct {
+		u64 ib_xt_err:1;
+		u64 ib_mark:1;
+		u64 ib_ln_uce:1;
+		u64 ib_errcode:3;
+		u64 ib_error:1;
+		u64 ib_stall__bte_1:1;
+		u64 ib_stall__bte_0:1;
+		u64 ib_stall__intr:1;
+		u64 ib_stall_ib:1;
+		u64 ib_intvn:1;
+		u64 ib_wb:1;
+		u64 ib_hold:1;
+		u64 ib_ack:1;
+		u64 ib_resp:1;
+		u64 ib_ack_cnt:11;
+		u64 ib_rsvd:7;
+		u64 ib_exc:5;
+		u64 ib_init:3;
+		u64 ib_imsg:8;
+		u64 ib_imsgtype:2;
+		u64 ib_use_old:1;
+		u64 ib_rsvd_1:11;
+	} ii_icrb0_b_fld_s;
+} ii_icrb0_b_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five   *
+ * registers (_A to _E) are required to read and write each entry.      *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icrb0_c_u {
+	u64 ii_icrb0_c_regval;
+	struct {
+		u64 ic_source:15;
+		u64 ic_size:2;
+		u64 ic_ct:1;
+		u64 ic_bte_num:1;
+		u64 ic_gbr:1;
+		u64 ic_resprqd:1;
+		u64 ic_bo:1;
+		u64 ic_suppl:15;
+		u64 ic_rsvd:27;
+	} ii_icrb0_c_fld_s;
+} ii_icrb0_c_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five   *
+ * registers (_A to _E) are required to read and write each entry.      *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icrb0_d_u {
+	u64 ii_icrb0_d_regval;
+	struct {
+		u64 id_pa_be:43;
+		u64 id_bte_op:1;
+		u64 id_pr_psc:4;
+		u64 id_pr_cnt:4;
+		u64 id_sleep:1;
+		u64 id_rsvd:11;
+	} ii_icrb0_d_fld_s;
+} ii_icrb0_d_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  There are 15 CRB Entries (ICRB0 to ICRBE) that are     *
+ * used for Crosstalk operations (both cacheline and partial            *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five   *
+ * registers (_A to _E) are required to read and write each entry.      *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icrb0_e_u {
+	u64 ii_icrb0_e_regval;
+	struct {
+		u64 ie_timeout:8;
+		u64 ie_context:15;
+		u64 ie_rsvd:1;
+		u64 ie_tvld:1;
+		u64 ie_cvld:1;
+		u64 ie_rsvd_0:38;
+	} ii_icrb0_e_fld_s;
+} ii_icrb0_e_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the lower 64 bits of the header of the       *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR   *
+ * register is set.                                                     *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icsml_u {
+	u64 ii_icsml_regval;
+	struct {
+		u64 i_tt_addr:47;
+		u64 i_newsuppl_ex:14;
+		u64 i_reserved:2;
+		u64 i_overflow:1;
+	} ii_icsml_fld_s;
+} ii_icsml_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the middle 64 bits of the header of the      *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR   *
+ * register is set.                                                     *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icsmm_u {
+	u64 ii_icsmm_regval;
+	struct {
+		u64 i_tt_ack_cnt:11;
+		u64 i_reserved:53;
+	} ii_icsmm_fld_s;
+} ii_icsmm_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the microscopic state, all the inputs to     *
+ * the protocol table, captured with the spurious message. Valid when   *
+ * the SP_MSG bit in the ICMR register is set.                          *
+ *									*
+ ************************************************************************/
+
+typedef union ii_icsmh_u {
+	u64 ii_icsmh_regval;
+	struct {
+		u64 i_tt_vld:1;
+		u64 i_xerr:1;
+		u64 i_ft_cwact_o:1;
+		u64 i_ft_wact_o:1;
+		u64 i_ft_active_o:1;
+		u64 i_sync:1;
+		u64 i_mnusg:1;
+		u64 i_mnusz:1;
+		u64 i_plusz:1;
+		u64 i_plusg:1;
+		u64 i_tt_exc:5;
+		u64 i_tt_wb:1;
+		u64 i_tt_hold:1;
+		u64 i_tt_ack:1;
+		u64 i_tt_resp:1;
+		u64 i_tt_intvn:1;
+		u64 i_g_stall_bte1:1;
+		u64 i_g_stall_bte0:1;
+		u64 i_g_stall_il:1;
+		u64 i_g_stall_ib:1;
+		u64 i_tt_imsg:8;
+		u64 i_tt_imsgtype:2;
+		u64 i_tt_use_old:1;
+		u64 i_tt_respreqd:1;
+		u64 i_tt_bte_num:1;
+		u64 i_cbn:1;
+		u64 i_match:1;
+		u64 i_rpcnt_lt_34:1;
+		u64 i_rpcnt_ge_34:1;
+		u64 i_rpcnt_lt_18:1;
+		u64 i_rpcnt_ge_18:1;
+		u64 i_rpcnt_lt_2:1;
+		u64 i_rpcnt_ge_2:1;
+		u64 i_rqcnt_lt_18:1;
+		u64 i_rqcnt_ge_18:1;
+		u64 i_rqcnt_lt_2:1;
+		u64 i_rqcnt_ge_2:1;
+		u64 i_tt_device:7;
+		u64 i_tt_init:3;
+		u64 i_reserved:5;
+	} ii_icsmh_fld_s;
+} ii_icsmh_u_t;
+
+/************************************************************************
+ *									*
+ *  The Shub DEBUG unit provides a 3-bit selection signal to the        *
+ * II core and a 3-bit selection signal to the fsbclk domain in the II  *
+ * wrapper.                                                             *
+ *									*
+ ************************************************************************/
+
+typedef union ii_idbss_u {
+	u64 ii_idbss_regval;
+	struct {
+		u64 i_iioclk_core_submenu:3;
+		u64 i_rsvd:5;
+		u64 i_fsbclk_wrapper_submenu:3;
+		u64 i_rsvd_1:5;
+		u64 i_iioclk_menu:5;
+		u64 i_rsvd_2:43;
+	} ii_idbss_fld_s;
+} ii_idbss_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register is used to set up the length for a       *
+ * transfer and then to monitor the progress of that transfer. This     *
+ * register needs to be initialized before a transfer is started. A     *
+ * legitimate write to this register will set the Busy bit, clear the   *
+ * Error bit, and initialize the length to the value desired.           *
+ * While the transfer is in progress, hardware will decrement the       *
+ * length field with each successful block that is copied. Once the     *
+ * transfer completes, hardware will clear the Busy bit. The length     *
+ * field will also contain the number of cache lines left to be         *
+ * transferred.                                                         *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibls0_u {
+	u64 ii_ibls0_regval;
+	struct {
+		u64 i_length:16;
+		u64 i_error:1;
+		u64 i_rsvd_1:3;
+		u64 i_busy:1;
+		u64 i_rsvd:43;
+	} ii_ibls0_fld_s;
+} ii_ibls0_u_t;
+
+/************************************************************************
+ *									*
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibsa0_u {
+	u64 ii_ibsa0_regval;
+	struct {
+		u64 i_rsvd_1:7;
+		u64 i_addr:42;
+		u64 i_rsvd:15;
+	} ii_ibsa0_fld_s;
+} ii_ibsa0_u_t;
+
+/************************************************************************
+ *									*
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibda0_u {
+	u64 ii_ibda0_regval;
+	struct {
+		u64 i_rsvd_1:7;
+		u64 i_addr:42;
+		u64 i_rsvd:15;
+	} ii_ibda0_fld_s;
+} ii_ibda0_u_t;
+
+/************************************************************************
+ *									*
+ *  Writing to this register sets up the attributes of the transfer     *
+ * and initiates the transfer operation. Reading this register has      *
+ * the side effect of terminating any transfer in progress. Note:       *
+ * stopping a transfer midstream could have an adverse impact on the    *
+ * other BTE. If a BTE stream has to be stopped (due to error           *
+ * handling for example), both BTE streams should be stopped and        *
+ * their transfers discarded.                                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibct0_u {
+	u64 ii_ibct0_regval;
+	struct {
+		u64 i_zerofill:1;
+		u64 i_rsvd_2:3;
+		u64 i_notify:1;
+		u64 i_rsvd_1:3;
+		u64 i_poison:1;
+		u64 i_rsvd:55;
+	} ii_ibct0_fld_s;
+} ii_ibct0_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the address to which the WINV is sent.       *
+ * This address has to be cache line aligned.                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibna0_u {
+	u64 ii_ibna0_regval;
+	struct {
+		u64 i_rsvd_1:7;
+		u64 i_addr:42;
+		u64 i_rsvd:15;
+	} ii_ibna0_fld_s;
+} ii_ibna0_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the programmable level as well as the node   *
+ * ID and PI unit of the processor to which the interrupt will be       *
+ * sent.								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibia0_u {
+	u64 ii_ibia0_regval;
+	struct {
+		u64 i_rsvd_2:1;
+		u64 i_node_id:11;
+		u64 i_rsvd_1:4;
+		u64 i_level:7;
+		u64 i_rsvd:41;
+	} ii_ibia0_fld_s;
+} ii_ibia0_u_t;
+
+/************************************************************************
+ *									*
+ * Description:  This register is used to set up the length for a       *
+ * transfer and then to monitor the progress of that transfer. This     *
+ * register needs to be initialized before a transfer is started. A     *
+ * legitimate write to this register will set the Busy bit, clear the   *
+ * Error bit, and initialize the length to the value desired.           *
+ * While the transfer is in progress, hardware will decrement the       *
+ * length field with each successful block that is copied. Once the     *
+ * transfer completes, hardware will clear the Busy bit. The length     *
+ * field will also contain the number of cache lines left to be         *
+ * transferred.                                                         *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibls1_u {
+	u64 ii_ibls1_regval;
+	struct {
+		u64 i_length:16;
+		u64 i_error:1;
+		u64 i_rsvd_1:3;
+		u64 i_busy:1;
+		u64 i_rsvd:43;
+	} ii_ibls1_fld_s;
+} ii_ibls1_u_t;
+
+/************************************************************************
+ *									*
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibsa1_u {
+	u64 ii_ibsa1_regval;
+	struct {
+		u64 i_rsvd_1:7;
+		u64 i_addr:33;
+		u64 i_rsvd:24;
+	} ii_ibsa1_fld_s;
+} ii_ibsa1_u_t;
+
+/************************************************************************
+ *									*
+ *  This register should be loaded before a transfer is started. The    *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical       *
+ * address as described in Section 1.3, Figure2 and Figure3. Since      *
+ * the bottom 7 bits of the address are always taken to be zero, BTE    *
+ * transfers are always cacheline-aligned.                              *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibda1_u {
+	u64 ii_ibda1_regval;
+	struct {
+		u64 i_rsvd_1:7;
+		u64 i_addr:33;
+		u64 i_rsvd:24;
+	} ii_ibda1_fld_s;
+} ii_ibda1_u_t;
+
+/************************************************************************
+ *									*
+ *  Writing to this register sets up the attributes of the transfer     *
+ * and initiates the transfer operation. Reading this register has      *
+ * the side effect of terminating any transfer in progress. Note:       *
+ * stopping a transfer midstream could have an adverse impact on the    *
+ * other BTE. If a BTE stream has to be stopped (due to error           *
+ * handling for example), both BTE streams should be stopped and        *
+ * their transfers discarded.                                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibct1_u {
+	u64 ii_ibct1_regval;
+	struct {
+		u64 i_zerofill:1;
+		u64 i_rsvd_2:3;
+		u64 i_notify:1;
+		u64 i_rsvd_1:3;
+		u64 i_poison:1;
+		u64 i_rsvd:55;
+	} ii_ibct1_fld_s;
+} ii_ibct1_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the address to which the WINV is sent.       *
+ * This address has to be cache line aligned.                           *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibna1_u {
+	u64 ii_ibna1_regval;
+	struct {
+		u64 i_rsvd_1:7;
+		u64 i_addr:33;
+		u64 i_rsvd:24;
+	} ii_ibna1_fld_s;
+} ii_ibna1_u_t;
+
+/************************************************************************
+ *									*
+ *  This register contains the programmable level as well as the node   *
+ * ID and PI unit of the processor to which the interrupt will be       *
+ * sent.								*
+ *									*
+ ************************************************************************/
+
+typedef union ii_ibia1_u {
+	u64 ii_ibia1_regval;
+	struct {
+		u64 i_pi_id:1;
+		u64 i_node_id:8;
+		u64 i_rsvd_1:7;
+		u64 i_level:7;
+		u64 i_rsvd:41;
+	} ii_ibia1_fld_s;
+} ii_ibia1_u_t;
+
+/************************************************************************
+ *									*
+ *  This register defines the resources that feed information into      *
+ * the two performance counters located in the IO Performance           *
+ * Profiling Register. There are 17 different quantities that can be    *
+ * measured. Given these 17 different options, the two performance      *
+ * counters have 15 of them in common; menu selections 0 through 0xE    *
+ * are identical for each performance counter. As for the other two     *
+ * options, one is available from one performance counter and the       *
+ * other is available from the other performance counter. Hence, the    *
+ * II supports all 17*16=272 possible combinations of quantities to     *
+ * measure.                                                             *
+ *									*
+ ************************************************************************/
+
+typedef union ii_ipcr_u {
+	u64 ii_ipcr_regval;
+	struct {
+		u64 i_ippr0_c:4;
+		u64 i_ippr1_c:4;
+		u64 i_icct:8;
+		u64 i_rsvd:48;
+	} ii_ipcr_fld_s;
+} ii_ipcr_u_t;
+
+/************************************************************************
+ *									*
+ *									*
+ *									*
+ ************************************************************************/
+
+typedef union ii_ippr_u {
+	u64 ii_ippr_regval;
+	struct {
+		u64 i_ippr0:32;
+		u64 i_ippr1:32;
+	} ii_ippr_fld_s;
+} ii_ippr_u_t;
+
+/************************************************************************
+ *									*
+ * The following defines which were not formed into structures are	*
+ * probably identical to another register, and the name of the		*
+ * register is provided against each of these registers. This		*
+ * information needs to be checked carefully				*
+ *									*
+ *		IIO_ICRB1_A		IIO_ICRB0_A			*
+ *		IIO_ICRB1_B		IIO_ICRB0_B			*
+ *		IIO_ICRB1_C		IIO_ICRB0_C			*
+ *		IIO_ICRB1_D		IIO_ICRB0_D			*
+ *		IIO_ICRB1_E		IIO_ICRB0_E			*
+ *		IIO_ICRB2_A		IIO_ICRB0_A			*
+ *		IIO_ICRB2_B		IIO_ICRB0_B			*
+ *		IIO_ICRB2_C		IIO_ICRB0_C			*
+ *		IIO_ICRB2_D		IIO_ICRB0_D			*
+ *		IIO_ICRB2_E		IIO_ICRB0_E			*
+ *		IIO_ICRB3_A		IIO_ICRB0_A			*
+ *		IIO_ICRB3_B		IIO_ICRB0_B			*
+ *		IIO_ICRB3_C		IIO_ICRB0_C			*
+ *		IIO_ICRB3_D		IIO_ICRB0_D			*
+ *		IIO_ICRB3_E		IIO_ICRB0_E			*
+ *		IIO_ICRB4_A		IIO_ICRB0_A			*
+ *		IIO_ICRB4_B		IIO_ICRB0_B			*
+ *		IIO_ICRB4_C		IIO_ICRB0_C			*
+ *		IIO_ICRB4_D		IIO_ICRB0_D			*
+ *		IIO_ICRB4_E		IIO_ICRB0_E			*
+ *		IIO_ICRB5_A		IIO_ICRB0_A			*
+ *		IIO_ICRB5_B		IIO_ICRB0_B			*
+ *		IIO_ICRB5_C		IIO_ICRB0_C			*
+ *		IIO_ICRB5_D		IIO_ICRB0_D			*
+ *		IIO_ICRB5_E		IIO_ICRB0_E			*
+ *		IIO_ICRB6_A		IIO_ICRB0_A			*
+ *		IIO_ICRB6_B		IIO_ICRB0_B			*
+ *		IIO_ICRB6_C		IIO_ICRB0_C			*
+ *		IIO_ICRB6_D		IIO_ICRB0_D			*
+ *		IIO_ICRB6_E		IIO_ICRB0_E			*
+ *		IIO_ICRB7_A		IIO_ICRB0_A			*
+ *		IIO_ICRB7_B		IIO_ICRB0_B			*
+ *		IIO_ICRB7_C		IIO_ICRB0_C			*
+ *		IIO_ICRB7_D		IIO_ICRB0_D			*
+ *		IIO_ICRB7_E		IIO_ICRB0_E			*
+ *		IIO_ICRB8_A		IIO_ICRB0_A			*
+ *		IIO_ICRB8_B		IIO_ICRB0_B			*
+ *		IIO_ICRB8_C		IIO_ICRB0_C			*
+ *		IIO_ICRB8_D		IIO_ICRB0_D			*
+ *		IIO_ICRB8_E		IIO_ICRB0_E			*
+ *		IIO_ICRB9_A		IIO_ICRB0_A			*
+ *		IIO_ICRB9_B		IIO_ICRB0_B			*
+ *		IIO_ICRB9_C		IIO_ICRB0_C			*
+ *		IIO_ICRB9_D		IIO_ICRB0_D			*
+ *		IIO_ICRB9_E		IIO_ICRB0_E			*
+ *		IIO_ICRBA_A		IIO_ICRB0_A			*
+ *		IIO_ICRBA_B		IIO_ICRB0_B			*
+ *		IIO_ICRBA_C		IIO_ICRB0_C			*
+ *		IIO_ICRBA_D		IIO_ICRB0_D			*
+ *		IIO_ICRBA_E		IIO_ICRB0_E			*
+ *		IIO_ICRBB_A		IIO_ICRB0_A			*
+ *		IIO_ICRBB_B		IIO_ICRB0_B			*
+ *		IIO_ICRBB_C		IIO_ICRB0_C			*
+ *		IIO_ICRBB_D		IIO_ICRB0_D			*
+ *		IIO_ICRBB_E		IIO_ICRB0_E			*
+ *		IIO_ICRBC_A		IIO_ICRB0_A			*
+ *		IIO_ICRBC_B		IIO_ICRB0_B			*
+ *		IIO_ICRBC_C		IIO_ICRB0_C			*
+ *		IIO_ICRBC_D		IIO_ICRB0_D			*
+ *		IIO_ICRBC_E		IIO_ICRB0_E			*
+ *		IIO_ICRBD_A		IIO_ICRB0_A			*
+ *		IIO_ICRBD_B		IIO_ICRB0_B			*
+ *		IIO_ICRBD_C		IIO_ICRB0_C			*
+ *		IIO_ICRBD_D		IIO_ICRB0_D			*
+ *		IIO_ICRBD_E		IIO_ICRB0_E			*
+ *		IIO_ICRBE_A		IIO_ICRB0_A			*
+ *		IIO_ICRBE_B		IIO_ICRB0_B			*
+ *		IIO_ICRBE_C		IIO_ICRB0_C			*
+ *		IIO_ICRBE_D		IIO_ICRB0_D			*
+ *		IIO_ICRBE_E		IIO_ICRB0_E			*
+ *									*
+ ************************************************************************/
+
+/*
+ * Slightly friendlier names for some common registers.
+ */
+#define IIO_WIDGET              IIO_WID		/* Widget identification */
+#define IIO_WIDGET_STAT         IIO_WSTAT	/* Widget status register */
+#define IIO_WIDGET_CTRL         IIO_WCR		/* Widget control register */
+#define IIO_PROTECT             IIO_ILAPR	/* IO interface protection */
+#define IIO_PROTECT_OVRRD       IIO_ILAPO	/* IO protect override */
+#define IIO_OUTWIDGET_ACCESS    IIO_IOWA	/* Outbound widget access */
+#define IIO_INWIDGET_ACCESS     IIO_IIWA	/* Inbound widget access */
+#define IIO_INDEV_ERR_MASK      IIO_IIDEM	/* Inbound device error mask */
+#define IIO_LLP_CSR             IIO_ILCSR	/* LLP control and status */
+#define IIO_LLP_LOG             IIO_ILLR	/* LLP log */
+#define IIO_XTALKCC_TOUT        IIO_IXCC	/* Xtalk credit count timeout */
+#define IIO_XTALKTT_TOUT        IIO_IXTT	/* Xtalk tail timeout */
+#define IIO_IO_ERR_CLR          IIO_IECLR	/* IO error clear */
+#define IIO_IGFX_0 		IIO_IGFX0
+#define IIO_IGFX_1 		IIO_IGFX1
+#define IIO_IBCT_0		IIO_IBCT0
+#define IIO_IBCT_1		IIO_IBCT1
+#define IIO_IBLS_0		IIO_IBLS0
+#define IIO_IBLS_1		IIO_IBLS1
+#define IIO_IBSA_0		IIO_IBSA0
+#define IIO_IBSA_1		IIO_IBSA1
+#define IIO_IBDA_0		IIO_IBDA0
+#define IIO_IBDA_1		IIO_IBDA1
+#define IIO_IBNA_0		IIO_IBNA0
+#define IIO_IBNA_1		IIO_IBNA1
+#define IIO_IBIA_0		IIO_IBIA0
+#define IIO_IBIA_1		IIO_IBIA1
+#define IIO_IOPRB_0		IIO_IPRB0
+
+#define IIO_PRTE_A(_x)		(IIO_IPRTE0_A + (8 * (_x)))
+#define IIO_PRTE_B(_x)		(IIO_IPRTE0_B + (8 * (_x)))
+#define IIO_NUM_PRTES		8	/* Total number of PRB table entries */
+#define IIO_WIDPRTE_A(x)	IIO_PRTE_A(((x) - 8))	/* widget ID to its PRTE num */
+#define IIO_WIDPRTE_B(x)	IIO_PRTE_B(((x) - 8))	/* widget ID to its PRTE num */
+
+#define IIO_NUM_IPRBS 		9
+
+#define IIO_LLP_CSR_IS_UP		0x00002000
+#define IIO_LLP_CSR_LLP_STAT_MASK       0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT       12
+
+#define IIO_LLP_CB_MAX  0xffff	/* in ILLR CB_CNT, Max Check Bit errors */
+#define IIO_LLP_SN_MAX  0xffff	/* in ILLR SN_CNT, Max Sequence Number errors */
+
+/* key to IIO_PROTECT_OVRRD */
+#define IIO_PROTECT_OVRRD_KEY   0x53474972756c6573ull	/* "SGIrules" */
+
+/* BTE register names */
+#define IIO_BTE_STAT_0          IIO_IBLS_0	/* Also BTE length/status 0 */
+#define IIO_BTE_SRC_0           IIO_IBSA_0	/* Also BTE source address  0 */
+#define IIO_BTE_DEST_0          IIO_IBDA_0	/* Also BTE dest. address 0 */
+#define IIO_BTE_CTRL_0          IIO_IBCT_0	/* Also BTE control/terminate 0 */
+#define IIO_BTE_NOTIFY_0        IIO_IBNA_0	/* Also BTE notification 0 */
+#define IIO_BTE_INT_0           IIO_IBIA_0	/* Also BTE interrupt 0 */
+#define IIO_BTE_OFF_0           0	/* Base offset from BTE 0 regs. */
+#define IIO_BTE_OFF_1   	(IIO_IBLS_1 - IIO_IBLS_0)	/* Offset from base to BTE 1 */
+
+/* BTE register offsets from base */
+#define BTEOFF_STAT             0
+#define BTEOFF_SRC      	(IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
+#define BTEOFF_DEST     	(IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
+#define BTEOFF_CTRL     	(IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
+#define BTEOFF_NOTIFY   	(IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
+#define BTEOFF_INT      	(IIO_BTE_INT_0 - IIO_BTE_STAT_0)
+
+/* names used in shub diags */
+#define IIO_BASE_BTE0   IIO_IBLS_0
+#define IIO_BASE_BTE1   IIO_IBLS_1
+
+/*
+ * Macro which takes the widget number, and returns the
+ * IO PRB address of that widget.
+ * value _x is expected to be a widget number in the range
+ * 0, 8 - 0xF
+ */
+#define IIO_IOPRB(_x)	(IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+                	(_x) : \
+                	(_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
+
+/* GFX Flow Control Node/Widget Register */
+#define IIO_IGFX_W_NUM_BITS	4	/* size of widget num field */
+#define IIO_IGFX_W_NUM_MASK	((1<<IIO_IGFX_W_NUM_BITS)-1)
+#define IIO_IGFX_W_NUM_SHIFT	0
+#define IIO_IGFX_PI_NUM_BITS	1	/* size of PI num field */
+#define IIO_IGFX_PI_NUM_MASK	((1<<IIO_IGFX_PI_NUM_BITS)-1)
+#define IIO_IGFX_PI_NUM_SHIFT	4
+#define IIO_IGFX_N_NUM_BITS	8	/* size of node num field */
+#define IIO_IGFX_N_NUM_MASK	((1<<IIO_IGFX_N_NUM_BITS)-1)
+#define IIO_IGFX_N_NUM_SHIFT	5
+#define IIO_IGFX_P_NUM_BITS	1	/* size of processor num field */
+#define IIO_IGFX_P_NUM_MASK	((1<<IIO_IGFX_P_NUM_BITS)-1)
+#define IIO_IGFX_P_NUM_SHIFT	16
+#define IIO_IGFX_INIT(widget, pi, node, cpu)				(\
+	(((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) |	 \
+	(((pi)     & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)|	 \
+	(((node)   & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) |	 \
+	(((cpu)    & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
+
+/* Scratch registers (all bits available) */
+#define IIO_SCRATCH_REG0        IIO_ISCR0
+#define IIO_SCRATCH_REG1        IIO_ISCR1
+#define IIO_SCRATCH_MASK        0xffffffffffffffffUL
+
+#define IIO_SCRATCH_BIT0_0      0x0000000000000001UL
+#define IIO_SCRATCH_BIT0_1      0x0000000000000002UL
+#define IIO_SCRATCH_BIT0_2      0x0000000000000004UL
+#define IIO_SCRATCH_BIT0_3      0x0000000000000008UL
+#define IIO_SCRATCH_BIT0_4      0x0000000000000010UL
+#define IIO_SCRATCH_BIT0_5      0x0000000000000020UL
+#define IIO_SCRATCH_BIT0_6      0x0000000000000040UL
+#define IIO_SCRATCH_BIT0_7      0x0000000000000080UL
+#define IIO_SCRATCH_BIT0_8      0x0000000000000100UL
+#define IIO_SCRATCH_BIT0_9      0x0000000000000200UL
+#define IIO_SCRATCH_BIT0_A      0x0000000000000400UL
+
+#define IIO_SCRATCH_BIT1_0      0x0000000000000001UL
+#define IIO_SCRATCH_BIT1_1      0x0000000000000002UL
+/* IO Translation Table Entries */
+#define IIO_NUM_ITTES   7	/* ITTEs numbered 0..6 */
+					/* Hw manuals number them 1..7! */
+/*
+ * IIO_IMEM Register fields.
+ */
+#define IIO_IMEM_W0ESD  0x1UL	/* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD	(1UL << 4)	/* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD	(1UL << 8)	/* BTE 1 Shut down due to error */
+
+/*
+ * As a permanent workaround for a bug in the PI side of the shub, we've
+ * redefined big window 7 as small window 0.
+ XXX does this still apply for SN1??
+ */
+#define HUB_NUM_BIG_WINDOW	(IIO_NUM_ITTES - 1)
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN            HUB_NUM_BIG_WINDOW
+
+#define ILCSR_WARM_RESET        0x100
+
+/*
+ * CRB manipulation macros
+ *	The CRB macros are slightly complicated, since there are up to
+ *	four registers associated with each CRB entry.
+ */
+#define IIO_NUM_CRBS            15	/* Number of CRBs */
+#define IIO_NUM_PC_CRBS         4	/* Number of partial cache CRBs */
+#define IIO_ICRB_OFFSET         8
+#define IIO_ICRB_0              IIO_ICRB0_A
+#define IIO_ICRB_ADDR_SHFT	2	/* Shift to get proper address */
+/* XXX - This is now tuneable:
+        #define IIO_FIRST_PC_ENTRY 12
+ */
+
+#define IIO_ICRB_A(_x)	((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x))))
+#define IIO_ICRB_B(_x)	((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET))
+#define IIO_ICRB_C(_x)	((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET))
+#define IIO_ICRB_D(_x)	((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET))
+#define IIO_ICRB_E(_x)	((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET))
+
+#define TNUM_TO_WIDGET_DEV(_tnum)	(_tnum & 0x7)
+
+/*
+ * values for "ecode" field
+ */
+#define IIO_ICRB_ECODE_DERR     0	/* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR     1	/* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR     2	/* Write error by IIO access
+					 * e.g. WINV to a Read only line. */
+#define IIO_ICRB_ECODE_AERR     3	/* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR    4	/* Error on partial write */
+#define IIO_ICRB_ECODE_PRERR    5	/* Error on partial read  */
+#define IIO_ICRB_ECODE_TOUT     6	/* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR    7	/* Incoming xtalk pkt had error bit */
+
+/*
+ * Values for field imsgtype
+ */
+#define IIO_ICRB_IMSGT_XTALK    0	/* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_BTE      1	/* Incoming message from BTE    */
+#define IIO_ICRB_IMSGT_SN1NET   2	/* Incoming message from SN1 net */
+#define IIO_ICRB_IMSGT_CRB      3	/* Incoming message from CRB ???  */
+
+/*
+ * values for field initiator.
+ */
+#define IIO_ICRB_INIT_XTALK     0	/* Message originated in xtalk  */
+#define IIO_ICRB_INIT_BTE0      0x1	/* Message originated in BTE 0  */
+#define IIO_ICRB_INIT_SN1NET    0x2	/* Message originated in SN1net */
+#define IIO_ICRB_INIT_CRB       0x3	/* Message originated in CRB ?  */
+#define IIO_ICRB_INIT_BTE1      0x5	/* MEssage originated in BTE 1  */
+
+/*
+ * Number of credits Hub widget has while sending req/response to
+ * xbow.
+ * Value of 3 is required by Xbow 1.1
+ * We may be able to increase this to 4 with Xbow 1.2.
+ */
+#define		   HUBII_XBOW_CREDIT       3
+#define		   HUBII_XBOW_REV2_CREDIT  4
+
+/*
+ * Number of credits that xtalk devices should use when communicating
+ * with a SHub (depth of SHub's queue).
+ */
+#define HUB_CREDIT 4
+
+/*
+ * Some IIO_PRB fields
+ */
+#define IIO_PRB_MULTI_ERR	(1LL << 63)
+#define IIO_PRB_SPUR_RD		(1LL << 51)
+#define IIO_PRB_SPUR_WR		(1LL << 50)
+#define IIO_PRB_RD_TO		(1LL << 49)
+#define IIO_PRB_ERROR		(1LL << 48)
+
+/*************************************************************************
+
+ Some of the IIO field masks and shifts are defined here.
+ This is in order to maintain compatibility in SN0 and SN1 code
+ 
+**************************************************************************/
+
+/*
+ * ICMR register fields
+ * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
+ * present in SHub)
+ */
+
+#define IIO_ICMR_CRB_VLD_SHFT   20
+#define IIO_ICMR_CRB_VLD_MASK	(0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
+
+#define IIO_ICMR_FC_CNT_SHFT    16
+#define IIO_ICMR_FC_CNT_MASK	(0xf << IIO_ICMR_FC_CNT_SHFT)
+
+#define IIO_ICMR_C_CNT_SHFT     4
+#define IIO_ICMR_C_CNT_MASK	(0xf << IIO_ICMR_C_CNT_SHFT)
+
+#define IIO_ICMR_PRECISE	(1UL << 52)
+#define IIO_ICMR_CLR_RPPD	(1UL << 13)
+#define IIO_ICMR_CLR_RQPD	(1UL << 12)
+
+/*
+ * IIO PIO Deallocation register field masks : (IIO_IPDR)
+ XXX present but not needed in bedrock?  See the manual.
+ */
+#define IIO_IPDR_PND    	(1 << 4)
+
+/*
+ * IIO CRB deallocation register field masks: (IIO_ICDR)
+ */
+#define IIO_ICDR_PND    	(1 << 4)
+
+/* 
+ * IO BTE Length/Status (IIO_IBLS) register bit field definitions
+ */
+#define IBLS_BUSY		(0x1UL << 20)
+#define IBLS_ERROR_SHFT		16
+#define IBLS_ERROR		(0x1UL << IBLS_ERROR_SHFT)
+#define IBLS_LENGTH_MASK	0xffff
+
+/*
+ * IO BTE Control/Terminate register (IBCT) register bit field definitions
+ */
+#define IBCT_POISON		(0x1UL << 8)
+#define IBCT_NOTIFY		(0x1UL << 4)
+#define IBCT_ZFIL_MODE		(0x1UL << 0)
+
+/*
+ * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
+ */
+#define IIEPH1_VALID		(1UL << 44)
+#define IIEPH1_OVERRUN		(1UL << 40)
+#define IIEPH1_ERR_TYPE_SHFT	32
+#define IIEPH1_ERR_TYPE_MASK	0xf
+#define IIEPH1_SOURCE_SHFT	20
+#define IIEPH1_SOURCE_MASK	11
+#define IIEPH1_SUPPL_SHFT	8
+#define IIEPH1_SUPPL_MASK	11
+#define IIEPH1_CMD_SHFT		0
+#define IIEPH1_CMD_MASK		7
+
+#define IIEPH2_TAIL		(1UL << 40)
+#define IIEPH2_ADDRESS_SHFT	0
+#define IIEPH2_ADDRESS_MASK	38
+
+#define IIEPH1_ERR_SHORT_REQ	2
+#define IIEPH1_ERR_SHORT_REPLY	3
+#define IIEPH1_ERR_LONG_REQ	4
+#define IIEPH1_ERR_LONG_REPLY	5
+
+/*
+ * IO Error Clear register bit field definitions
+ */
+#define IECLR_PI1_FWD_INT	(1UL << 31)	/* clear PI1_FORWARD_INT in iidsr */
+#define IECLR_PI0_FWD_INT	(1UL << 30)	/* clear PI0_FORWARD_INT in iidsr */
+#define IECLR_SPUR_RD_HDR	(1UL << 29)	/* clear valid bit in ixss reg */
+#define IECLR_BTE1		(1UL << 18)	/* clear bte error 1 */
+#define IECLR_BTE0		(1UL << 17)	/* clear bte error 0 */
+#define IECLR_CRAZY		(1UL << 16)	/* clear crazy bit in wstat reg */
+#define IECLR_PRB_F		(1UL << 15)	/* clear err bit in PRB_F reg */
+#define IECLR_PRB_E		(1UL << 14)	/* clear err bit in PRB_E reg */
+#define IECLR_PRB_D		(1UL << 13)	/* clear err bit in PRB_D reg */
+#define IECLR_PRB_C		(1UL << 12)	/* clear err bit in PRB_C reg */
+#define IECLR_PRB_B		(1UL << 11)	/* clear err bit in PRB_B reg */
+#define IECLR_PRB_A		(1UL << 10)	/* clear err bit in PRB_A reg */
+#define IECLR_PRB_9		(1UL << 9)	/* clear err bit in PRB_9 reg */
+#define IECLR_PRB_8		(1UL << 8)	/* clear err bit in PRB_8 reg */
+#define IECLR_PRB_0		(1UL << 0)	/* clear err bit in PRB_0 reg */
+
+/*
+ * IIO CRB control register Fields: IIO_ICCR 
+ */
+#define	IIO_ICCR_PENDING	0x10000
+#define	IIO_ICCR_CMD_MASK	0xFF
+#define	IIO_ICCR_CMD_SHFT	7
+#define	IIO_ICCR_CMD_NOP	0x0	/* No Op */
+#define	IIO_ICCR_CMD_WAKE	0x100	/* Reactivate CRB entry and process */
+#define	IIO_ICCR_CMD_TIMEOUT	0x200	/* Make CRB timeout & mark invalid */
+#define	IIO_ICCR_CMD_EJECT	0x400	/* Contents of entry written to memory
+					 * via a WB
+					 */
+#define	IIO_ICCR_CMD_FLUSH	0x800
+
+/*
+ *
+ * CRB Register description.
+ *
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ *
+ * Many of the fields in CRB are status bits used by hardware
+ * for implementation of the protocol. It's very dangerous to
+ * mess around with the CRB registers.
+ *
+ * It's OK to read the CRB registers and try to make sense out of the
+ * fields in CRB.
+ *
+ * Updating CRB requires all activities in Hub IIO to be quiesced.
+ * otherwise, a write to CRB could corrupt other CRB entries.
+ * CRBs are here only as a back door peek to shub IIO's status.
+ * Quiescing implies  no dmas no PIOs
+ * either directly from the cpu or from sn0net.
+ * this is not something that can be done easily. So, AVOID updating
+ * CRBs.
+ */
+
+/*
+ * Easy access macros for CRBs, all 5 registers (A-E)
+ */
+typedef ii_icrb0_a_u_t icrba_t;
+#define a_sidn		ii_icrb0_a_fld_s.ia_sidn
+#define a_tnum		ii_icrb0_a_fld_s.ia_tnum
+#define a_addr          ii_icrb0_a_fld_s.ia_addr
+#define a_valid         ii_icrb0_a_fld_s.ia_vld
+#define a_iow           ii_icrb0_a_fld_s.ia_iow
+#define a_regvalue	ii_icrb0_a_regval
+
+typedef ii_icrb0_b_u_t icrbb_t;
+#define b_use_old       ii_icrb0_b_fld_s.ib_use_old
+#define b_imsgtype      ii_icrb0_b_fld_s.ib_imsgtype
+#define b_imsg          ii_icrb0_b_fld_s.ib_imsg
+#define b_initiator     ii_icrb0_b_fld_s.ib_init
+#define b_exc           ii_icrb0_b_fld_s.ib_exc
+#define b_ackcnt        ii_icrb0_b_fld_s.ib_ack_cnt
+#define b_resp          ii_icrb0_b_fld_s.ib_resp
+#define b_ack           ii_icrb0_b_fld_s.ib_ack
+#define b_hold          ii_icrb0_b_fld_s.ib_hold
+#define b_wb            ii_icrb0_b_fld_s.ib_wb
+#define b_intvn         ii_icrb0_b_fld_s.ib_intvn
+#define b_stall_ib      ii_icrb0_b_fld_s.ib_stall_ib
+#define b_stall_int     ii_icrb0_b_fld_s.ib_stall__intr
+#define b_stall_bte_0   ii_icrb0_b_fld_s.ib_stall__bte_0
+#define b_stall_bte_1   ii_icrb0_b_fld_s.ib_stall__bte_1
+#define b_error         ii_icrb0_b_fld_s.ib_error
+#define b_ecode         ii_icrb0_b_fld_s.ib_errcode
+#define b_lnetuce       ii_icrb0_b_fld_s.ib_ln_uce
+#define b_mark          ii_icrb0_b_fld_s.ib_mark
+#define b_xerr          ii_icrb0_b_fld_s.ib_xt_err
+#define b_regvalue	ii_icrb0_b_regval
+
+typedef ii_icrb0_c_u_t icrbc_t;
+#define c_suppl         ii_icrb0_c_fld_s.ic_suppl
+#define c_barrop        ii_icrb0_c_fld_s.ic_bo
+#define c_doresp        ii_icrb0_c_fld_s.ic_resprqd
+#define c_gbr           ii_icrb0_c_fld_s.ic_gbr
+#define c_btenum        ii_icrb0_c_fld_s.ic_bte_num
+#define c_cohtrans      ii_icrb0_c_fld_s.ic_ct
+#define c_xtsize        ii_icrb0_c_fld_s.ic_size
+#define c_source        ii_icrb0_c_fld_s.ic_source
+#define c_regvalue	ii_icrb0_c_regval
+
+typedef ii_icrb0_d_u_t icrbd_t;
+#define d_sleep         ii_icrb0_d_fld_s.id_sleep
+#define d_pricnt        ii_icrb0_d_fld_s.id_pr_cnt
+#define d_pripsc        ii_icrb0_d_fld_s.id_pr_psc
+#define d_bteop         ii_icrb0_d_fld_s.id_bte_op
+#define d_bteaddr       ii_icrb0_d_fld_s.id_pa_be	/* ic_pa_be fld has 2 names */
+#define d_benable       ii_icrb0_d_fld_s.id_pa_be	/* ic_pa_be fld has 2 names */
+#define d_regvalue	ii_icrb0_d_regval
+
+typedef ii_icrb0_e_u_t icrbe_t;
+#define icrbe_ctxtvld   ii_icrb0_e_fld_s.ie_cvld
+#define icrbe_toutvld   ii_icrb0_e_fld_s.ie_tvld
+#define icrbe_context   ii_icrb0_e_fld_s.ie_context
+#define icrbe_timeout   ii_icrb0_e_fld_s.ie_timeout
+#define e_regvalue	ii_icrb0_e_regval
+
+/* Number of widgets supported by shub */
+#define HUB_NUM_WIDGET          9
+#define HUB_WIDGET_ID_MIN       0x8
+#define HUB_WIDGET_ID_MAX       0xf
+
+#define HUB_WIDGET_PART_NUM     0xc120
+#define MAX_HUBS_PER_XBOW       2
+
+/* A few more #defines for backwards compatibility */
+#define iprb_t          ii_iprb0_u_t
+#define iprb_regval     ii_iprb0_regval
+#define iprb_mult_err	ii_iprb0_fld_s.i_mult_err
+#define iprb_spur_rd	ii_iprb0_fld_s.i_spur_rd
+#define iprb_spur_wr	ii_iprb0_fld_s.i_spur_wr
+#define iprb_rd_to	ii_iprb0_fld_s.i_rd_to
+#define iprb_ovflow     ii_iprb0_fld_s.i_of_cnt
+#define iprb_error      ii_iprb0_fld_s.i_error
+#define iprb_ff         ii_iprb0_fld_s.i_f
+#define iprb_mode       ii_iprb0_fld_s.i_m
+#define iprb_bnakctr    ii_iprb0_fld_s.i_nb
+#define iprb_anakctr    ii_iprb0_fld_s.i_na
+#define iprb_xtalkctr   ii_iprb0_fld_s.i_c
+
+#define LNK_STAT_WORKING        0x2		/* LLP is working */
+
+#define IIO_WSTAT_ECRAZY	(1ULL << 32)	/* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY	(1ULL << 9)	/* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK  0x7F		/* should be 0xFF?? */
+#define IIO_WSTAT_TXRETRY_SHFT  16
+#define IIO_WSTAT_TXRETRY_CNT(w)	(((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+                          		IIO_WSTAT_TXRETRY_MASK)
+
+/* Number of II perf. counters we can multiplex at once */
+
+#define IO_PERF_SETS	32
+
+/* Bit for the widget in inbound access register */
+#define IIO_IIWA_WIDGET(_w)	((u64)(1ULL << _w))
+/* Bit for the widget in outbound access register */
+#define IIO_IOWA_WIDGET(_w)	((u64)(1ULL << _w))
+
+/* NOTE: The following define assumes that we are going to get
+ * widget numbers from 8 thru F and the device numbers within
+ * widget from 0 thru 7.
+ */
+#define IIO_IIDEM_WIDGETDEV_MASK(w, d)	((u64)(1ULL << (8 * ((w) - 8) + (d))))
+
+/* IO Interrupt Destination Register */
+#define IIO_IIDSR_SENT_SHIFT    28
+#define IIO_IIDSR_SENT_MASK     0x30000000
+#define IIO_IIDSR_ENB_SHIFT     24
+#define IIO_IIDSR_ENB_MASK      0x01000000
+#define IIO_IIDSR_NODE_SHIFT    9
+#define IIO_IIDSR_NODE_MASK     0x000ff700
+#define IIO_IIDSR_PI_ID_SHIFT   8
+#define IIO_IIDSR_PI_ID_MASK    0x00000100
+#define IIO_IIDSR_LVL_SHIFT     0
+#define IIO_IIDSR_LVL_MASK      0x000000ff
+
+/* Xtalk timeout threshold register (IIO_IXTT) */
+#define IXTT_RRSP_TO_SHFT	55	/* read response timeout */
+#define IXTT_RRSP_TO_MASK	(0x1FULL << IXTT_RRSP_TO_SHFT)
+#define IXTT_RRSP_PS_SHFT	32	/* read responsed TO prescalar */
+#define IXTT_RRSP_PS_MASK	(0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
+#define IXTT_TAIL_TO_SHFT	0	/* tail timeout counter threshold */
+#define IXTT_TAIL_TO_MASK	(0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
+
+/*
+ * The IO LLP control status register and widget control register
+ */
+
+typedef union hubii_wcr_u {
+	u64 wcr_reg_value;
+	struct {
+		u64 wcr_widget_id:4,	/* LLP crossbar credit */
+		 wcr_tag_mode:1,	/* Tag mode */
+		 wcr_rsvd1:8,	/* Reserved */
+		 wcr_xbar_crd:3,	/* LLP crossbar credit */
+		 wcr_f_bad_pkt:1,	/* Force bad llp pkt enable */
+		 wcr_dir_con:1,	/* widget direct connect */
+		 wcr_e_thresh:5,	/* elasticity threshold */
+		 wcr_rsvd:41;	/* unused */
+	} wcr_fields_s;
+} hubii_wcr_t;
+
+#define iwcr_dir_con    wcr_fields_s.wcr_dir_con
+
+/* The structures below are defined to extract and modify the ii
+performance registers */
+
+/* io_perf_sel allows the caller to specify what tests will be
+   performed */
+
+typedef union io_perf_sel {
+	u64 perf_sel_reg;
+	struct {
+		u64 perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48;
+	} perf_sel_bits;
+} io_perf_sel_t;
+
+/* io_perf_cnt is to extract the count from the shub registers. Due to
+   hardware problems there is only one counter, not two. */
+
+typedef union io_perf_cnt {
+	u64 perf_cnt;
+	struct {
+		u64 perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32;
+	} perf_cnt_bits;
+
+} io_perf_cnt_t;
+
+typedef union iprte_a {
+	u64 entry;
+	struct {
+		u64 i_rsvd_1:3;
+		u64 i_addr:38;
+		u64 i_init:3;
+		u64 i_source:8;
+		u64 i_rsvd:2;
+		u64 i_widget:4;
+		u64 i_to_cnt:5;
+		u64 i_vld:1;
+	} iprte_fields;
+} iprte_a_t;
+
+#endif				/* _ASM_IA64_SN_SHUBIO_H */
diff --git a/arch/ia64/include/asm/sn/simulator.h b/arch/ia64/include/asm/sn/simulator.h
new file mode 100644
index 0000000..c2611f6
--- /dev/null
+++ b/arch/ia64/include/asm/sn/simulator.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SIMULATOR_H
+#define _ASM_IA64_SN_SIMULATOR_H
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_SGI_UV)
+#define SNMAGIC 0xaeeeeeee8badbeefL
+#define IS_MEDUSA()			({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
+
+#define SIMULATOR_SLEEP()		asm("nop.i 0x8beef")
+#define IS_RUNNING_ON_SIMULATOR()	(sn_prom_type)
+#define IS_RUNNING_ON_FAKE_PROM()	(sn_prom_type == 2)
+extern int sn_prom_type;		/* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
+#else
+#define IS_MEDUSA()			0
+#define SIMULATOR_SLEEP()
+#define IS_RUNNING_ON_SIMULATOR()	0
+#endif
+
+#endif /* _ASM_IA64_SN_SIMULATOR_H */
diff --git a/arch/ia64/include/asm/sn/sn2/sn_hwperf.h b/arch/ia64/include/asm/sn/sn2/sn_hwperf.h
new file mode 100644
index 0000000..e61ebac
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn2/sn_hwperf.h
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring
+ * SGI Altix node and router hardware
+ *
+ * Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004
+ */
+
+#ifndef SN_HWPERF_H
+#define SN_HWPERF_H
+
+/*
+ * object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO
+ * return an array of these. Do not change this without also
+ * changing the corresponding SAL code.
+ */
+#define SN_HWPERF_MAXSTRING		128
+struct sn_hwperf_object_info {
+	u32 id;
+	union {
+		struct {
+			u64 this_part:1;
+			u64 is_shared:1;
+		} fields;
+		struct {
+			u64 flags;
+			u64 reserved;
+		} b;
+	} f;
+	char name[SN_HWPERF_MAXSTRING];
+	char location[SN_HWPERF_MAXSTRING];
+	u32 ports;
+};
+
+#define sn_hwp_this_part	f.fields.this_part
+#define sn_hwp_is_shared	f.fields.is_shared
+#define sn_hwp_flags		f.b.flags
+
+/* macros for object classification */
+#define SN_HWPERF_IS_NODE(x)		((x) && strstr((x)->name, "SHub"))
+#define SN_HWPERF_IS_NODE_SHUB2(x)	((x) && strstr((x)->name, "SHub 2."))
+#define SN_HWPERF_IS_IONODE(x)		((x) && strstr((x)->name, "TIO"))
+#define SN_HWPERF_IS_NL3ROUTER(x)	((x) && strstr((x)->name, "NL3Router"))
+#define SN_HWPERF_IS_NL4ROUTER(x)	((x) && strstr((x)->name, "NL4Router"))
+#define SN_HWPERF_IS_OLDROUTER(x)	((x) && strstr((x)->name, "Router"))
+#define SN_HWPERF_IS_ROUTER(x)		(SN_HWPERF_IS_NL3ROUTER(x) || 		\
+					 	SN_HWPERF_IS_NL4ROUTER(x) || 	\
+					 	SN_HWPERF_IS_OLDROUTER(x))
+#define SN_HWPERF_FOREIGN(x)		((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared)
+#define SN_HWPERF_SAME_OBJTYPE(x,y)	((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\
+					(SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\
+					(SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y)))
+
+/* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */
+struct sn_hwperf_port_info {
+	u32 port;
+	u32 conn_id;
+	u32 conn_port;
+};
+
+/* for HWPERF_{GET,SET}_MMRS */
+struct sn_hwperf_data {
+	u64 addr;
+	u64 data;
+};
+
+/* user ioctl() argument, see below */
+struct sn_hwperf_ioctl_args {
+        u64 arg;		/* argument, usually an object id */
+        u64 sz;                 /* size of transfer */
+        void *ptr;              /* pointer to source/target */
+        u32 v0;			/* second return value */
+};
+
+/*
+ * For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE,
+ * sn_hwperf_ioctl_args.arg can be used to specify a CPU on which
+ * to call SAL, and whether to use an interprocessor interrupt
+ * or task migration in order to do so. If the CPU specified is
+ * SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used.
+ */
+#define SN_HWPERF_ARG_ANY_CPU		0x7fffffffUL
+#define SN_HWPERF_ARG_CPU_MASK		0x7fffffff00000000ULL
+#define SN_HWPERF_ARG_USE_IPI_MASK	0x8000000000000000ULL
+#define SN_HWPERF_ARG_OBJID_MASK	0x00000000ffffffffULL
+
+/* 
+ * ioctl requests on the "sn_hwperf" misc device that call SAL.
+ */
+#define SN_HWPERF_OP_MEM_COPYIN		0x1000
+#define SN_HWPERF_OP_MEM_COPYOUT	0x2000
+#define SN_HWPERF_OP_MASK		0x0fff
+
+/*
+ * Determine mem requirement.
+ * arg	don't care
+ * sz	8
+ * p	pointer to u64 integer
+ */
+#define	SN_HWPERF_GET_HEAPSIZE		1
+
+/*
+ * Install mem for SAL drvr
+ * arg	don't care
+ * sz	sizeof buffer pointed to by p
+ * p	pointer to buffer for scratch area
+ */
+#define SN_HWPERF_INSTALL_HEAP		2
+
+/*
+ * Determine number of objects
+ * arg	don't care
+ * sz	8
+ * p	pointer to u64 integer
+ */
+#define SN_HWPERF_OBJECT_COUNT		(10|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Determine object "distance", relative to a cpu. This operation can
+ * execute on a designated logical cpu number, using either an IPI or
+ * via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
+ * the current CPU is used. See the SN_HWPERF_ARG_* macros above.
+ *
+ * arg	bitmap of IPI flag, cpu number and object id
+ * sz	8
+ * p	pointer to u64 integer
+ */
+#define SN_HWPERF_OBJECT_DISTANCE	(11|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Enumerate objects. Special case if sz == 8, returns the required
+ * buffer size.
+ * arg	don't care
+ * sz	sizeof buffer pointed to by p
+ * p	pointer to array of struct sn_hwperf_object_info
+ */
+#define SN_HWPERF_ENUM_OBJECTS		(12|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Enumerate NumaLink ports for an object. Special case if sz == 8,
+ * returns the required buffer size.
+ * arg	object id
+ * sz	sizeof buffer pointed to by p
+ * p	pointer to array of struct sn_hwperf_port_info
+ */
+#define SN_HWPERF_ENUM_PORTS		(13|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * SET/GET memory mapped registers. These operations can execute
+ * on a designated logical cpu number, using either an IPI or via
+ * task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
+ * the current CPU is used. See the SN_HWPERF_ARG_* macros above.
+ *
+ * arg	bitmap of ipi flag, cpu number and object id
+ * sz	sizeof buffer pointed to by p
+ * p	pointer to array of struct sn_hwperf_data
+ */
+#define SN_HWPERF_SET_MMRS		(14|SN_HWPERF_OP_MEM_COPYIN)
+#define SN_HWPERF_GET_MMRS		(15|SN_HWPERF_OP_MEM_COPYOUT| \
+					    SN_HWPERF_OP_MEM_COPYIN)
+/*
+ * Lock a shared object
+ * arg	object id
+ * sz	don't care
+ * p	don't care
+ */
+#define SN_HWPERF_ACQUIRE		16
+
+/*
+ * Unlock a shared object
+ * arg	object id
+ * sz	don't care
+ * p	don't care
+ */
+#define SN_HWPERF_RELEASE		17
+
+/*
+ * Break a lock on a shared object
+ * arg	object id
+ * sz	don't care
+ * p	don't care
+ */
+#define SN_HWPERF_FORCE_RELEASE		18
+
+/*
+ * ioctl requests on "sn_hwperf" that do not call SAL
+ */
+
+/*
+ * get cpu info as an array of hwperf_object_info_t. 
+ * id is logical CPU number, name is description, location
+ * is geoid (e.g. 001c04#1c). Special case if sz == 8,
+ * returns the required buffer size.
+ *
+ * arg	don't care
+ * sz	sizeof buffer pointed to by p
+ * p	pointer to array of struct sn_hwperf_object_info
+ */
+#define SN_HWPERF_GET_CPU_INFO		(100|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Given an object id, return it's node number (aka cnode).
+ * arg	object id
+ * sz	8
+ * p	pointer to u64 integer
+ */
+#define SN_HWPERF_GET_OBJ_NODE		(101|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Given a node number (cnode), return it's nasid.
+ * arg	ordinal node number (aka cnodeid)
+ * sz	8
+ * p	pointer to u64 integer
+ */
+#define SN_HWPERF_GET_NODE_NASID	(102|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Given a node id, determine the id of the nearest node with CPUs
+ * and the id of the nearest node that has memory. The argument
+ * node would normally be a "headless" node, e.g. an "IO node".
+ * Return 0 on success.
+ */
+extern int sn_hwperf_get_nearest_node(cnodeid_t node,
+	cnodeid_t *near_mem, cnodeid_t *near_cpu);
+
+/* return codes */
+#define SN_HWPERF_OP_OK			0
+#define SN_HWPERF_OP_NOMEM		1
+#define SN_HWPERF_OP_NO_PERM		2
+#define SN_HWPERF_OP_IO_ERROR		3
+#define SN_HWPERF_OP_BUSY		4
+#define SN_HWPERF_OP_RECONFIGURE	253
+#define SN_HWPERF_OP_INVAL		254
+
+int sn_topology_open(struct inode *inode, struct file *file);
+int sn_topology_release(struct inode *inode, struct file *file);
+#endif				/* SN_HWPERF_H */
diff --git a/arch/ia64/include/asm/sn/sn_cpuid.h b/arch/ia64/include/asm/sn/sn_cpuid.h
new file mode 100644
index 0000000..a676dd9
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_cpuid.h
@@ -0,0 +1,132 @@
+/* 
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#ifndef _ASM_IA64_SN_SN_CPUID_H
+#define _ASM_IA64_SN_SN_CPUID_H
+
+#include <linux/smp.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/intrinsics.h>
+
+
+/*
+ * Functions for converting between cpuids, nodeids and NASIDs.
+ * 
+ * These are for SGI platforms only.
+ *
+ */
+
+
+
+
+/*
+ *  Definitions of terms (these definitions are for IA64 ONLY. Other architectures
+ *  use cpuid/cpunum quite defferently):
+ *
+ *	   CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
+ *		the cpu. The value cpuid has no significance on IA64 other than
+ *		the boot cpu is 0.
+ *			smp_processor_id() returns the cpuid of the current cpu.
+ *
+ * 	   CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
+ *		This is the same as 31:24 of the processor LID register
+ *			hard_smp_processor_id()- cpu_physical_id of current processor
+ *			cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
+ *			cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid> 
+ *				* not real efficient - don't use in perf critical code
+ *
+ *         SLICE - a number in the range of 0 - 3 (typically) that represents the
+ *		cpu number on a brick.
+ *
+ *	   SUBNODE - (almost obsolete) the number of the FSB that a cpu is
+ *		connected to. This is also the same as the PI number. Usually 0 or 1.
+ *
+ *	NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no 
+ *	significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
+ *
+ *
+ * The macros convert between cpu physical ids & slice/nasid/cnodeid.
+ * These terms are described below:
+ *
+ *
+ * Brick
+ *          -----   -----           -----   -----       CPU
+ *          | 0 |   | 1 |           | 0 |   | 1 |       SLICE
+ *          -----   -----           -----   -----
+ *            |       |               |       |
+ *            |       |               |       |
+ *          0 |       | 2           0 |       | 2       FSB SLOT
+ *             -------                 -------  
+ *                |                       |
+ *                |                       |
+ *                |                       |
+ *             ------------      -------------
+ *             |          |      |           |
+ *             |    SHUB  |      |   SHUB    |        NASID   (0..MAX_NASIDS)
+ *             |          |----- |           |        CNODEID (0..num_compact_nodes-1)
+ *             |          |      |           |
+ *             |          |      |           |
+ *             ------------      -------------
+ *                   |                 |
+ *                           
+ *
+ */
+
+#define get_node_number(addr)			NASID_GET(addr)
+
+/*
+ * NOTE: on non-MP systems, only cpuid 0 exists
+ */
+
+extern short physical_node_map[];	/* indexed by nasid to get cnode */
+
+/*
+ * Macros for retrieving info about current cpu
+ */
+#define get_nasid()	(sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
+#define get_subnode()	(sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
+#define get_slice()	(sn_nodepda->phys_cpuid[smp_processor_id()].slice)
+#define get_cnode()	(sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
+#define get_sapicid()	((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
+
+/*
+ * Macros for retrieving info about an arbitrary cpu
+ *	cpuid - logical cpu id
+ */
+#define cpuid_to_nasid(cpuid)		(sn_nodepda->phys_cpuid[cpuid].nasid)
+#define cpuid_to_subnode(cpuid)		(sn_nodepda->phys_cpuid[cpuid].subnode)
+#define cpuid_to_slice(cpuid)		(sn_nodepda->phys_cpuid[cpuid].slice)
+
+
+/*
+ * Dont use the following in performance critical code. They require scans
+ * of potentially large tables.
+ */
+extern int nasid_slice_to_cpuid(int, int);
+
+/*
+ * cnodeid_to_nasid - convert a cnodeid to a NASID
+ */
+#define cnodeid_to_nasid(cnodeid)	(sn_cnodeid_to_nasid[cnodeid])
+ 
+/*
+ * nasid_to_cnodeid - convert a NASID to a cnodeid
+ */
+#define nasid_to_cnodeid(nasid)		(physical_node_map[nasid])
+
+/*
+ * partition_coherence_id - get the coherence ID of the current partition
+ */
+extern u8 sn_coherency_id;
+#define partition_coherence_id()	(sn_coherency_id)
+
+#endif /* _ASM_IA64_SN_SN_CPUID_H */
+
diff --git a/arch/ia64/include/asm/sn/sn_feature_sets.h b/arch/ia64/include/asm/sn/sn_feature_sets.h
new file mode 100644
index 0000000..8e83ac1
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_feature_sets.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_IA64_SN_FEATURE_SETS_H
+#define _ASM_IA64_SN_FEATURE_SETS_H
+
+/*
+ * SN PROM Features
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005-2006 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+
+/* --------------------- PROM Features -----------------------------*/
+extern int sn_prom_feature_available(int id);
+
+#define MAX_PROM_FEATURE_SETS			2
+
+/*
+ * The following defines features that may or may not be supported by the
+ * current PROM. The OS uses sn_prom_feature_available(feature) to test for
+ * the presence of a PROM feature. Down rev (old) PROMs will always test
+ * "false" for new features.
+ *
+ * Use:
+ * 		if (sn_prom_feature_available(PRF_XXX))
+ * 			...
+ */
+
+#define PRF_PAL_CACHE_FLUSH_SAFE	0
+#define PRF_DEVICE_FLUSH_LIST		1
+#define PRF_HOTPLUG_SUPPORT		2
+#define PRF_CPU_DISABLE_SUPPORT		3
+
+/* --------------------- OS Features -------------------------------*/
+
+/*
+ * The following defines OS features that are optionally present in
+ * the operating system.
+ * During boot, PROM is notified of these features via a series of calls:
+ *
+ * 		ia64_sn_set_os_feature(feature1);
+ *
+ * Once enabled, a feature cannot be disabled.
+ *
+ * By default, features are disabled unless explicitly enabled.
+ *
+ * These defines must be kept in sync with the corresponding
+ * PROM definitions in feature_sets.h.
+ */
+#define  OSF_MCA_SLV_TO_OS_INIT_SLV	0
+#define  OSF_FEAT_LOG_SBES		1
+#define  OSF_ACPI_ENABLE		2
+#define  OSF_PCISEGMENT_ENABLE		3
+
+
+#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h
new file mode 100644
index 0000000..1f5ff47
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_sal.h
@@ -0,0 +1,1233 @@
+#ifndef _ASM_IA64_SN_SN_SAL_H
+#define _ASM_IA64_SN_SN_SAL_H
+
+/*
+ * System Abstraction Layer definitions for IA64
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+
+#include <asm/sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/shub_mmr.h>
+
+// SGI Specific Calls
+#define  SN_SAL_POD_MODE                           0x02000001
+#define  SN_SAL_SYSTEM_RESET                       0x02000002
+#define  SN_SAL_PROBE                              0x02000003
+#define  SN_SAL_GET_MASTER_NASID                   0x02000004
+#define	 SN_SAL_GET_KLCONFIG_ADDR		   0x02000005
+#define  SN_SAL_LOG_CE				   0x02000006
+#define  SN_SAL_REGISTER_CE			   0x02000007
+#define  SN_SAL_GET_PARTITION_ADDR		   0x02000009
+#define  SN_SAL_XP_ADDR_REGION			   0x0200000f
+#define  SN_SAL_NO_FAULT_ZONE_VIRTUAL		   0x02000010
+#define  SN_SAL_NO_FAULT_ZONE_PHYSICAL		   0x02000011
+#define  SN_SAL_PRINT_ERROR			   0x02000012
+#define  SN_SAL_REGISTER_PMI_HANDLER		   0x02000014
+#define  SN_SAL_SET_ERROR_HANDLING_FEATURES	   0x0200001a	// reentrant
+#define  SN_SAL_GET_FIT_COMPT			   0x0200001b	// reentrant
+#define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
+#define  SN_SAL_GET_SN_INFO                        0x0200001e
+#define  SN_SAL_CONSOLE_PUTC                       0x02000021
+#define  SN_SAL_CONSOLE_GETC                       0x02000022
+#define  SN_SAL_CONSOLE_PUTS                       0x02000023
+#define  SN_SAL_CONSOLE_GETS                       0x02000024
+#define  SN_SAL_CONSOLE_GETS_TIMEOUT               0x02000025
+#define  SN_SAL_CONSOLE_POLL                       0x02000026
+#define  SN_SAL_CONSOLE_INTR                       0x02000027
+#define  SN_SAL_CONSOLE_PUTB			   0x02000028
+#define  SN_SAL_CONSOLE_XMIT_CHARS		   0x0200002a
+#define  SN_SAL_CONSOLE_READC			   0x0200002b
+#define  SN_SAL_SYSCTL_OP			   0x02000030
+#define  SN_SAL_SYSCTL_MODID_GET	           0x02000031
+#define  SN_SAL_SYSCTL_GET                         0x02000032
+#define  SN_SAL_SYSCTL_IOBRICK_MODULE_GET          0x02000033
+#define  SN_SAL_SYSCTL_IO_PORTSPEED_GET            0x02000035
+#define  SN_SAL_SYSCTL_SLAB_GET                    0x02000036
+#define  SN_SAL_BUS_CONFIG		   	   0x02000037
+#define  SN_SAL_SYS_SERIAL_GET			   0x02000038
+#define  SN_SAL_PARTITION_SERIAL_GET		   0x02000039
+#define  SN_SAL_SYSCTL_PARTITION_GET               0x0200003a
+#define  SN_SAL_SYSTEM_POWER_DOWN		   0x0200003b
+#define  SN_SAL_GET_MASTER_BASEIO_NASID		   0x0200003c
+#define  SN_SAL_COHERENCE                          0x0200003d
+#define  SN_SAL_MEMPROTECT                         0x0200003e
+#define  SN_SAL_SYSCTL_FRU_CAPTURE		   0x0200003f
+
+#define  SN_SAL_SYSCTL_IOBRICK_PCI_OP		   0x02000042	// reentrant
+#define	 SN_SAL_IROUTER_OP			   0x02000043
+#define  SN_SAL_SYSCTL_EVENT                       0x02000044
+#define  SN_SAL_IOIF_INTERRUPT			   0x0200004a
+#define  SN_SAL_HWPERF_OP			   0x02000050   // lock
+#define  SN_SAL_IOIF_ERROR_INTERRUPT		   0x02000051
+#define  SN_SAL_IOIF_PCI_SAFE			   0x02000052
+#define  SN_SAL_IOIF_SLOT_ENABLE		   0x02000053
+#define  SN_SAL_IOIF_SLOT_DISABLE		   0x02000054
+#define  SN_SAL_IOIF_GET_HUBDEV_INFO		   0x02000055
+#define  SN_SAL_IOIF_GET_PCIBUS_INFO		   0x02000056
+#define  SN_SAL_IOIF_GET_PCIDEV_INFO		   0x02000057
+#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST	   0x02000058	// deprecated
+#define  SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST	   0x0200005a
+
+#define SN_SAL_IOIF_INIT			   0x0200005f
+#define SN_SAL_HUB_ERROR_INTERRUPT		   0x02000060
+#define SN_SAL_BTE_RECOVER			   0x02000061
+#define SN_SAL_RESERVED_DO_NOT_USE		   0x02000062
+#define SN_SAL_IOIF_GET_PCI_TOPOLOGY		   0x02000064
+
+#define  SN_SAL_GET_PROM_FEATURE_SET		   0x02000065
+#define  SN_SAL_SET_OS_FEATURE_SET		   0x02000066
+#define  SN_SAL_INJECT_ERROR			   0x02000067
+#define  SN_SAL_SET_CPU_NUMBER			   0x02000068
+
+#define  SN_SAL_KERNEL_LAUNCH_EVENT		   0x02000069
+#define  SN_SAL_WATCHLIST_ALLOC			   0x02000070
+#define  SN_SAL_WATCHLIST_FREE			   0x02000071
+
+/*
+ * Service-specific constants
+ */
+
+/* Console interrupt manipulation */
+	/* action codes */
+#define SAL_CONSOLE_INTR_OFF    0       /* turn the interrupt off */
+#define SAL_CONSOLE_INTR_ON     1       /* turn the interrupt on */
+#define SAL_CONSOLE_INTR_STATUS 2	/* retrieve the interrupt status */
+	/* interrupt specification & status return codes */
+#define SAL_CONSOLE_INTR_XMIT	1	/* output interrupt */
+#define SAL_CONSOLE_INTR_RECV	2	/* input interrupt */
+
+/* interrupt handling */
+#define SAL_INTR_ALLOC		1
+#define SAL_INTR_FREE		2
+#define SAL_INTR_REDIRECT	3
+
+/*
+ * operations available on the generic SN_SAL_SYSCTL_OP
+ * runtime service
+ */
+#define SAL_SYSCTL_OP_IOBOARD		0x0001  /*  retrieve board type */
+#define SAL_SYSCTL_OP_TIO_JLCK_RST      0x0002  /* issue TIO clock reset */
+
+/*
+ * IRouter (i.e. generalized system controller) operations
+ */
+#define SAL_IROUTER_OPEN	0	/* open a subchannel */
+#define SAL_IROUTER_CLOSE	1	/* close a subchannel */
+#define SAL_IROUTER_SEND	2	/* send part of an IRouter packet */
+#define SAL_IROUTER_RECV	3	/* receive part of an IRouter packet */
+#define SAL_IROUTER_INTR_STATUS	4	/* check the interrupt status for
+					 * an open subchannel
+					 */
+#define SAL_IROUTER_INTR_ON	5	/* enable an interrupt */
+#define SAL_IROUTER_INTR_OFF	6	/* disable an interrupt */
+#define SAL_IROUTER_INIT	7	/* initialize IRouter driver */
+
+/* IRouter interrupt mask bits */
+#define SAL_IROUTER_INTR_XMIT	SAL_CONSOLE_INTR_XMIT
+#define SAL_IROUTER_INTR_RECV	SAL_CONSOLE_INTR_RECV
+
+/*
+ * Error Handling Features
+ */
+#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV	0x1	// obsolete
+#define SAL_ERR_FEAT_LOG_SBES			0x2	// obsolete
+#define SAL_ERR_FEAT_MFR_OVERRIDE		0x4
+#define SAL_ERR_FEAT_SBE_THRESHOLD		0xffff0000
+
+/*
+ * SAL Error Codes
+ */
+#define SALRET_MORE_PASSES	1
+#define SALRET_OK		0
+#define SALRET_NOT_IMPLEMENTED	(-1)
+#define SALRET_INVALID_ARG	(-2)
+#define SALRET_ERROR		(-3)
+
+#define SN_SAL_FAKE_PROM			   0x02009999
+
+/**
+  * sn_sal_revision - get the SGI SAL revision number
+  *
+  * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor).
+  * This routine simply extracts the major and minor values and
+  * presents them in a u32 format.
+  *
+  * For example, version 4.05 would be represented at 0x0405.
+  */
+static inline u32
+sn_sal_rev(void)
+{
+	struct ia64_sal_systab *systab = __va(efi.sal_systab);
+
+	return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
+}
+
+/*
+ * Returns the master console nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_console_nasid(void)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
+
+	if (ret_stuff.status < 0)
+		return ret_stuff.status;
+
+	/* Master console nasid is in 'v0' */
+	return ret_stuff.v0;
+}
+
+/*
+ * Returns the master baseio nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_master_baseio_nasid(void)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0);
+
+	if (ret_stuff.status < 0)
+		return ret_stuff.status;
+
+	/* Master baseio nasid is in 'v0' */
+	return ret_stuff.v0;
+}
+
+static inline void *
+ia64_sn_get_klconfig_addr(nasid_t nasid)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
+	return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
+}
+
+/*
+ * Returns the next console character.
+ */
+static inline u64
+ia64_sn_console_getc(int *ch)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
+
+	/* character is in 'v0' */
+	*ch = (int)ret_stuff.v0;
+
+	return ret_stuff.status;
+}
+
+/*
+ * Read a character from the SAL console device, after a previous interrupt
+ * or poll operation has given us to know that a character is available
+ * to be read.
+ */
+static inline u64
+ia64_sn_console_readc(void)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
+
+	/* character is in 'v0' */
+	return ret_stuff.v0;
+}
+
+/*
+ * Sends the given character to the console.
+ */
+static inline u64
+ia64_sn_console_putc(char ch)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (u64)ch, 0, 0, 0, 0, 0, 0);
+
+	return ret_stuff.status;
+}
+
+/*
+ * Sends the given buffer to the console.
+ */
+static inline u64
+ia64_sn_console_putb(const char *buf, int len)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0; 
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (u64)buf, (u64)len, 0, 0, 0, 0, 0);
+
+	if ( ret_stuff.status == 0 ) {
+		return ret_stuff.v0;
+	}
+	return (u64)0;
+}
+
+/*
+ * Print a platform error record
+ */
+static inline u64
+ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (u64)hook, (u64)rec, 0, 0, 0, 0, 0);
+
+	return ret_stuff.status;
+}
+
+/*
+ * Check for Platform errors
+ */
+static inline u64
+ia64_sn_plat_cpei_handler(void)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
+
+	return ret_stuff.status;
+}
+
+/*
+ * Set Error Handling Features	(Obsolete)
+ */
+static inline u64
+ia64_sn_plat_set_error_handling_features(void)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES,
+		SAL_ERR_FEAT_LOG_SBES,
+		0, 0, 0, 0, 0, 0);
+
+	return ret_stuff.status;
+}
+
+/*
+ * Checks for console input.
+ */
+static inline u64
+ia64_sn_console_check(int *result)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
+
+	/* result is in 'v0' */
+	*result = (int)ret_stuff.v0;
+
+	return ret_stuff.status;
+}
+
+/*
+ * Checks console interrupt status
+ */
+static inline u64
+ia64_sn_console_intr_status(void)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+		 0, SAL_CONSOLE_INTR_STATUS,
+		 0, 0, 0, 0, 0);
+
+	if (ret_stuff.status == 0) {
+	    return ret_stuff.v0;
+	}
+	
+	return 0;
+}
+
+/*
+ * Enable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_enable(u64 intr)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+		 intr, SAL_CONSOLE_INTR_ON,
+		 0, 0, 0, 0, 0);
+}
+
+/*
+ * Disable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_disable(u64 intr)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+		 intr, SAL_CONSOLE_INTR_OFF,
+		 0, 0, 0, 0, 0);
+}
+
+/*
+ * Sends a character buffer to the console asynchronously.
+ */
+static inline u64
+ia64_sn_console_xmit_chars(char *buf, int len)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
+		 (u64)buf, (u64)len,
+		 0, 0, 0, 0, 0);
+
+	if (ret_stuff.status == 0) {
+	    return ret_stuff.v0;
+	}
+
+	return 0;
+}
+
+/*
+ * Returns the iobrick module Id
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
+
+	/* result is in 'v0' */
+	*result = (int)ret_stuff.v0;
+
+	return ret_stuff.status;
+}
+
+/**
+ * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
+ *
+ * SN_SAL_POD_MODE actually takes an argument, but it's always
+ * 0 when we call it from the kernel, so we don't have to expose
+ * it to the caller.
+ */
+static inline u64
+ia64_sn_pod_mode(void)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
+	if (isrv.status)
+		return 0;
+	return isrv.v0;
+}
+
+/**
+ * ia64_sn_probe_mem - read from memory safely
+ * @addr: address to probe
+ * @size: number bytes to read (1,2,4,8)
+ * @data_ptr: address to store value read by probe (-1 returned if probe fails)
+ *
+ * Call into the SAL to do a memory read.  If the read generates a machine
+ * check, this routine will recover gracefully and return -1 to the caller.
+ * @addr is usually a kernel virtual address in uncached space (i.e. the
+ * address starts with 0xc), but if called in physical mode, @addr should
+ * be a physical address.
+ *
+ * Return values:
+ *  0 - probe successful
+ *  1 - probe failed (generated MCA)
+ *  2 - Bad arg
+ * <0 - PAL error
+ */
+static inline u64
+ia64_sn_probe_mem(long addr, long size, void *data_ptr)
+{
+	struct ia64_sal_retval isrv;
+
+	SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
+
+	if (data_ptr) {
+		switch (size) {
+		case 1:
+			*((u8*)data_ptr) = (u8)isrv.v0;
+			break;
+		case 2:
+			*((u16*)data_ptr) = (u16)isrv.v0;
+			break;
+		case 4:
+			*((u32*)data_ptr) = (u32)isrv.v0;
+			break;
+		case 8:
+			*((u64*)data_ptr) = (u64)isrv.v0;
+			break;
+		default:
+			isrv.status = 2;
+		}
+	}
+	return isrv.status;
+}
+
+/*
+ * Retrieve the system serial number as an ASCII string.
+ */
+static inline u64
+ia64_sn_sys_serial_get(char *buf)
+{
+	struct ia64_sal_retval ret_stuff;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+extern char sn_system_serial_number_string[];
+extern u64 sn_partition_serial_number;
+
+static inline char *
+sn_system_serial_number(void) {
+	if (sn_system_serial_number_string[0]) {
+		return(sn_system_serial_number_string);
+	} else {
+		ia64_sn_sys_serial_get(sn_system_serial_number_string);
+		return(sn_system_serial_number_string);
+	}
+}
+	
+
+/*
+ * Returns a unique id number for this system and partition (suitable for
+ * use with license managers), based in part on the system serial number.
+ */
+static inline u64
+ia64_sn_partition_serial_get(void)
+{
+	struct ia64_sal_retval ret_stuff;
+	ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0,
+				   0, 0, 0, 0, 0, 0);
+	if (ret_stuff.status != 0)
+	    return 0;
+	return ret_stuff.v0;
+}
+
+static inline u64
+sn_partition_serial_number_val(void) {
+	if (unlikely(sn_partition_serial_number == 0)) {
+		sn_partition_serial_number = ia64_sn_partition_serial_get();
+	}
+	return sn_partition_serial_number;
+}
+
+/*
+ * Returns the partition id of the nasid passed in as an argument,
+ * or INVALID_PARTID if the partition id cannot be retrieved.
+ */
+static inline partid_t
+ia64_sn_sysctl_partition_get(nasid_t nasid)
+{
+	struct ia64_sal_retval ret_stuff;
+	SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
+		0, 0, 0, 0, 0, 0);
+	if (ret_stuff.status != 0)
+	    return -1;
+	return ((partid_t)ret_stuff.v0);
+}
+
+/*
+ * Returns the physical address of the partition's reserved page through
+ * an iterative number of calls.
+ *
+ * On first call, 'cookie' and 'len' should be set to 0, and 'addr'
+ * set to the nasid of the partition whose reserved page's address is
+ * being sought.
+ * On subsequent calls, pass the values, that were passed back on the
+ * previous call.
+ *
+ * While the return status equals SALRET_MORE_PASSES, keep calling
+ * this function after first copying 'len' bytes starting at 'addr'
+ * into 'buf'. Once the return status equals SALRET_OK, 'addr' will
+ * be the physical address of the partition's reserved page. If the
+ * return status equals neither of these, an error as occurred.
+ */
+static inline s64
+sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+	struct ia64_sal_retval rv;
+	ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
+				   *addr, buf, *len, 0, 0, 0);
+	*cookie = rv.v0;
+	*addr = rv.v1;
+	*len = rv.v2;
+	return rv.status;
+}
+
+/*
+ * Register or unregister a physical address range being referenced across
+ * a partition boundary for which certain SAL errors should be scanned for,
+ * cleaned up and ignored.  This is of value for kernel partitioning code only.
+ * Values for the operation argument:
+ *	1 = register this address range with SAL
+ *	0 = unregister this address range with SAL
+ * 
+ * SAL maintains a reference count on an address range in case it is registered
+ * multiple times.
+ * 
+ * On success, returns the reference count of the address range after the SAL
+ * call has performed the current registration/unregistration.  Returns a
+ * negative value if an error occurred.
+ */
+static inline int
+sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
+{
+	struct ia64_sal_retval ret_stuff;
+	ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len,
+			 (u64)operation, 0, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+/*
+ * Register or unregister an instruction range for which SAL errors should
+ * be ignored.  If an error occurs while in the registered range, SAL jumps
+ * to return_addr after ignoring the error.  Values for the operation argument:
+ *	1 = register this instruction range with SAL
+ *	0 = unregister this instruction range with SAL
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
+			 int virtual, int operation)
+{
+	struct ia64_sal_retval ret_stuff;
+	u64 call;
+	if (virtual) {
+		call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
+	} else {
+		call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
+	}
+	ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr,
+			 (u64)1, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+/*
+ * Register or unregister a function to handle a PMI received by a CPU.
+ * Before calling the registered handler, SAL sets r1 to the value that
+ * was passed in as the global_pointer.
+ *
+ * If the handler pointer is NULL, then the currently registered handler
+ * will be unregistered.
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_register_pmi_handler(u64 handler, u64 global_pointer)
+{
+	struct ia64_sal_retval ret_stuff;
+	ia64_sal_oemcall(&ret_stuff, SN_SAL_REGISTER_PMI_HANDLER, handler,
+			 global_pointer, 0, 0, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+/*
+ * Change or query the coherence domain for this partition. Each cpu-based
+ * nasid is represented by a bit in an array of 64-bit words:
+ *      0 = not in this partition's coherency domain
+ *      1 = in this partition's coherency domain
+ *
+ * It is not possible for the local system's nasids to be removed from
+ * the coherency domain.  Purpose of the domain arguments:
+ *      new_domain = set the coherence domain to the given nasids
+ *      old_domain = return the current coherence domain
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_change_coherence(u64 *new_domain, u64 *old_domain)
+{
+	struct ia64_sal_retval ret_stuff;
+	ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain,
+				(u64)old_domain, 0, 0, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+/*
+ * Change memory access protections for a physical address range.
+ * nasid_array is not used on Altix, but may be in future architectures.
+ * Available memory protection access classes are defined after the function.
+ */
+static inline int
+sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
+				(u64)nasid_array, perms, 0, 0, 0);
+	return ret_stuff.status;
+}
+#define SN_MEMPROT_ACCESS_CLASS_0		0x14a080
+#define SN_MEMPROT_ACCESS_CLASS_1		0x2520c2
+#define SN_MEMPROT_ACCESS_CLASS_2		0x14a1ca
+#define SN_MEMPROT_ACCESS_CLASS_3		0x14a290
+#define SN_MEMPROT_ACCESS_CLASS_6		0x084080
+#define SN_MEMPROT_ACCESS_CLASS_7		0x021080
+
+/*
+ * Turns off system power.
+ */
+static inline void
+ia64_sn_power_down(void)
+{
+	struct ia64_sal_retval ret_stuff;
+	SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
+	while(1)
+		cpu_relax();
+	/* never returns */
+}
+
+/**
+ * ia64_sn_fru_capture - tell the system controller to capture hw state
+ *
+ * This routine will call the SAL which will tell the system controller(s)
+ * to capture hw mmr information from each SHub in the system.
+ */
+static inline u64
+ia64_sn_fru_capture(void)
+{
+        struct ia64_sal_retval isrv;
+        SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
+        if (isrv.status)
+                return 0;
+        return isrv.v0;
+}
+
+/*
+ * Performs an operation on a PCI bus or slot -- power up, power down
+ * or reset.
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, 
+			      u64 bus, char slot, 
+			      u64 action)
+{
+	struct ia64_sal_retval rv = {0, 0, 0, 0};
+
+	SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action,
+		 bus, (u64) slot, 0, 0);
+	if (rv.status)
+	    	return rv.v0;
+	return 0;
+}
+
+
+/*
+ * Open a subchannel for sending arbitrary data to the system
+ * controller network via the system controller device associated with
+ * 'nasid'.  Return the subchannel number or a negative error code.
+ */
+static inline int
+ia64_sn_irtr_open(nasid_t nasid)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
+			   0, 0, 0, 0, 0);
+	return (int) rv.v0;
+}
+
+/*
+ * Close system controller subchannel 'subch' previously opened on 'nasid'.
+ */
+static inline int
+ia64_sn_irtr_close(nasid_t nasid, int subch)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
+			   (u64) nasid, (u64) subch, 0, 0, 0, 0);
+	return (int) rv.status;
+}
+
+/*
+ * Read data from system controller associated with 'nasid' on
+ * subchannel 'subch'.  The buffer to be filled is pointed to by
+ * 'buf', and its capacity is in the integer pointed to by 'len'.  The
+ * referent of 'len' is set to the number of bytes read by the SAL
+ * call.  The return value is either SALRET_OK (for bytes read) or
+ * SALRET_ERROR (for error or "no data available").
+ */
+static inline int
+ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
+			   (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+			   0, 0);
+	return (int) rv.status;
+}
+
+/*
+ * Write data to the system controller network via the system
+ * controller associated with 'nasid' on suchannel 'subch'.  The
+ * buffer to be written out is pointed to by 'buf', and 'len' is the
+ * number of bytes to be written.  The return value is either the
+ * number of bytes written (which could be zero) or a negative error
+ * code.
+ */
+static inline int
+ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
+			   (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+			   0, 0);
+	return (int) rv.v0;
+}
+
+/*
+ * Check whether any interrupts are pending for the system controller
+ * associated with 'nasid' and its subchannel 'subch'.  The return
+ * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
+ * SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr(nasid_t nasid, int subch)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
+			   (u64) nasid, (u64) subch, 0, 0, 0, 0);
+	return (int) rv.v0;
+}
+
+/*
+ * Enable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
+			   (u64) nasid, (u64) subch, intr, 0, 0, 0);
+	return (int) rv.v0;
+}
+
+/*
+ * Disable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
+			   (u64) nasid, (u64) subch, intr, 0, 0, 0);
+	return (int) rv.v0;
+}
+
+/*
+ * Set up a node as the point of contact for system controller
+ * environmental event delivery.
+ */
+static inline int
+ia64_sn_sysctl_event_init(nasid_t nasid)
+{
+        struct ia64_sal_retval rv;
+        SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_EVENT, (u64) nasid,
+			   0, 0, 0, 0, 0, 0);
+        return (int) rv.v0;
+}
+
+/*
+ * Ask the system controller on the specified nasid to reset
+ * the CX corelet clock.  Only valid on TIO nodes.
+ */
+static inline int
+ia64_sn_sysctl_tio_clock_reset(nasid_t nasid)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_TIO_JLCK_RST,
+			nasid, 0, 0, 0, 0, 0);
+	if (rv.status != 0)
+		return (int)rv.status;
+	if (rv.v0 != 0)
+		return (int)rv.v0;
+
+	return 0;
+}
+
+/*
+ * Get the associated ioboard type for a given nasid.
+ */
+static inline long
+ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard)
+{
+	struct ia64_sal_retval isrv;
+	SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD,
+			   nasid, 0, 0, 0, 0, 0);
+	if (isrv.v0 != 0) {
+		*ioboard = isrv.v0;
+		return isrv.status;
+	}
+	if (isrv.v1 != 0) {
+		*ioboard = isrv.v1;
+		return isrv.status;
+	}
+
+	return isrv.status;
+}
+
+/**
+ * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
+ * @nasid: NASID of node to read
+ * @index: FIT entry index to be retrieved (0..n)
+ * @fitentry: 16 byte buffer where FIT entry will be stored.
+ * @banbuf: optional buffer for retrieving banner
+ * @banlen: length of banner buffer
+ *
+ * Access to the physical PROM chips needs to be serialized since reads and
+ * writes can't occur at the same time, so we need to call into the SAL when
+ * we want to look at the FIT entries on the chips.
+ *
+ * Returns:
+ *	%SALRET_OK if ok
+ *	%SALRET_INVALID_ARG if index too big
+ *	%SALRET_NOT_IMPLEMENTED if running on older PROM
+ *	??? if nasid invalid OR banner buffer not large enough
+ */
+static inline int
+ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
+		      u64 banlen)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
+			banbuf, banlen, 0, 0);
+	return (int) rv.status;
+}
+
+/*
+ * Initialize the SAL components of the system controller
+ * communication driver; specifically pass in a sizable buffer that
+ * can be used for allocation of subchannel queues as new subchannels
+ * are opened.  "buf" points to the buffer, and "len" specifies its
+ * length.
+ */
+static inline int
+ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
+			   (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
+	return (int) rv.status;
+}
+
+/*
+ * Returns the nasid, subnode & slice corresponding to a SAPIC ID
+ *
+ *  In:
+ *	arg0 - SN_SAL_GET_SAPIC_INFO
+ *	arg1 - sapicid (lid >> 16) 
+ *  Out:
+ *	v0 - nasid
+ *	v1 - subnode
+ *	v2 - slice
+ */
+static inline u64
+ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+	if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+		if (nasid) *nasid = sapicid & 0xfff;
+		if (subnode) *subnode = (sapicid >> 13) & 1;
+		if (slice) *slice = (sapicid >> 12) & 3;
+		return 0;
+	}
+/***** END HACK *******/
+
+	if (ret_stuff.status < 0)
+		return ret_stuff.status;
+
+	if (nasid) *nasid = (int) ret_stuff.v0;
+	if (subnode) *subnode = (int) ret_stuff.v1;
+	if (slice) *slice = (int) ret_stuff.v2;
+	return 0;
+}
+ 
+/*
+ * Returns information about the HUB/SHUB.
+ *  In:
+ *	arg0 - SN_SAL_GET_SN_INFO
+ * 	arg1 - 0 (other values reserved for future use)
+ *  Out:
+ *	v0 
+ *		[7:0]   - shub type (0=shub1, 1=shub2)
+ *		[15:8]  - Log2 max number of nodes in entire system (includes
+ *			  C-bricks, I-bricks, etc)
+ *		[23:16] - Log2 of nodes per sharing domain			 
+ * 		[31:24] - partition ID
+ * 		[39:32] - coherency_id
+ * 		[47:40] - regionsize
+ *	v1 
+ *		[15:0]  - nasid mask (ex., 0x7ff for 11 bit nasid)
+ *	 	[23:15] - bit position of low nasid bit
+ */
+static inline u64
+ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, 
+		u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	ret_stuff.v1 = 0;
+	ret_stuff.v2 = 0;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+	if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+		int nasid = get_sapicid() & 0xfff;
+#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
+#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
+		if (shubtype) *shubtype = 0;
+		if (nasid_bitmask) *nasid_bitmask = 0x7ff;
+		if (nasid_shift) *nasid_shift = 38;
+		if (systemsize) *systemsize = 10;
+		if (sharing_domain_size) *sharing_domain_size = 8;
+		if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
+		if (coher) *coher = nasid >> 9;
+		if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
+			SH_SHUB_ID_NODES_PER_BIT_SHFT;
+		return 0;
+	}
+/***** END HACK *******/
+
+	if (ret_stuff.status < 0)
+		return ret_stuff.status;
+
+	if (shubtype) *shubtype = ret_stuff.v0 & 0xff;
+	if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff;
+	if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff;
+	if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff;
+	if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff;
+	if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff;
+	if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff);
+	if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff;
+	return 0;
+}
+ 
+/*
+ * This is the access point to the Altix PROM hardware performance
+ * and status monitoring interface. For info on using this, see
+ * arch/ia64/include/asm/sn/sn2/sn_hwperf.h
+ */
+static inline int
+ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
+                  u64 a3, u64 a4, int *v0)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
+		opcode, a0, a1, a2, a3, a4);
+	if (v0)
+		*v0 = (int) rv.v0;
+	return (int) rv.status;
+}
+
+static inline int
+ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0);
+	return (int) rv.status;
+}
+
+/*
+ * BTE error recovery is implemented in SAL
+ */
+static inline int
+ia64_sn_bte_recovery(nasid_t nasid)
+{
+	struct ia64_sal_retval rv;
+
+	rv.status = 0;
+	SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0);
+	if (rv.status == SALRET_NOT_IMPLEMENTED)
+		return 0;
+	return (int) rv.status;
+}
+
+static inline int
+ia64_sn_is_fake_prom(void)
+{
+	struct ia64_sal_retval rv;
+	SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0);
+	return (rv.status == 0);
+}
+
+static inline int
+ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
+{
+	struct ia64_sal_retval rv;
+
+	SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
+	if (rv.status != 0)
+		return rv.status;
+	*feature_set = rv.v0;
+	return 0;
+}
+
+static inline int
+ia64_sn_set_os_feature(int feature)
+{
+	struct ia64_sal_retval rv;
+
+	SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0);
+	return rv.status;
+}
+
+static inline int
+sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
+{
+	struct ia64_sal_retval ret_stuff;
+
+	ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data,
+				(u64)ecc, 0, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+static inline int
+ia64_sn_set_cpu_number(int cpu)
+{
+	struct ia64_sal_retval rv;
+
+	SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+	return rv.status;
+}
+static inline int
+ia64_sn_kernel_launch_event(void)
+{
+ 	struct ia64_sal_retval rv;
+	SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
+	return rv.status;
+}
+
+union sn_watchlist_u {
+	u64     val;
+	struct {
+		u64	blade	: 16,
+			size	: 32,
+			filler	: 16;
+	};
+};
+
+static inline int
+sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
+				unsigned long *intr_mmr_offset)
+{
+	struct ia64_sal_retval rv;
+	unsigned long addr;
+	union sn_watchlist_u size_blade;
+	int watchlist;
+
+	addr = (unsigned long)mq;
+	size_blade.size = mq_size;
+	size_blade.blade = blade;
+
+	/*
+	 * bios returns watchlist number or negative error number.
+	 */
+	ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr,
+			size_blade.val, (u64)intr_mmr_offset,
+			(u64)&watchlist, 0, 0, 0);
+	if (rv.status < 0)
+		return rv.status;
+
+	return watchlist;
+}
+
+static inline int
+sn_mq_watchlist_free(int blade, int watchlist_num)
+{
+	struct ia64_sal_retval rv;
+	ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade,
+			watchlist_num, 0, 0, 0, 0, 0);
+	return rv.status;
+}
+#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/arch/ia64/include/asm/sn/tioca.h b/arch/ia64/include/asm/sn/tioca.h
new file mode 100644
index 0000000..666222d
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioca.h
@@ -0,0 +1,596 @@
+#ifndef _ASM_IA64_SN_TIO_TIOCA_H
+#define _ASM_IA64_SN_TIO_TIOCA_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#define TIOCA_PART_NUM	0xE020
+#define TIOCA_MFGR_NUM	0x24
+#define TIOCA_REV_A	0x1
+
+/*
+ * Register layout for TIO:CA.  See below for bitmasks for each register.
+ */
+
+struct tioca {
+	u64	ca_id;				/* 0x000000 */
+	u64	ca_control1;			/* 0x000008 */
+	u64	ca_control2;			/* 0x000010 */
+	u64	ca_status1;			/* 0x000018 */
+	u64	ca_status2;			/* 0x000020 */
+	u64	ca_gart_aperature;		/* 0x000028 */
+	u64	ca_gfx_detach;			/* 0x000030 */
+	u64	ca_inta_dest_addr;		/* 0x000038 */
+	u64	ca_intb_dest_addr;		/* 0x000040 */
+	u64	ca_err_int_dest_addr;		/* 0x000048 */
+	u64	ca_int_status;			/* 0x000050 */
+	u64	ca_int_status_alias;		/* 0x000058 */
+	u64	ca_mult_error;			/* 0x000060 */
+	u64	ca_mult_error_alias;		/* 0x000068 */
+	u64	ca_first_error;			/* 0x000070 */
+	u64	ca_int_mask;			/* 0x000078 */
+	u64	ca_crm_pkterr_type;		/* 0x000080 */
+	u64	ca_crm_pkterr_type_alias;	/* 0x000088 */
+	u64	ca_crm_ct_error_detail_1;	/* 0x000090 */
+	u64	ca_crm_ct_error_detail_2;	/* 0x000098 */
+	u64	ca_crm_tnumto;			/* 0x0000A0 */
+	u64	ca_gart_err;			/* 0x0000A8 */
+	u64	ca_pcierr_type;			/* 0x0000B0 */
+	u64	ca_pcierr_addr;			/* 0x0000B8 */
+
+	u64	ca_pad_0000C0[3];		/* 0x0000{C0..D0} */
+
+	u64	ca_pci_rd_buf_flush;		/* 0x0000D8 */
+	u64	ca_pci_dma_addr_extn;		/* 0x0000E0 */
+	u64	ca_agp_dma_addr_extn;		/* 0x0000E8 */
+	u64	ca_force_inta;			/* 0x0000F0 */
+	u64	ca_force_intb;			/* 0x0000F8 */
+	u64	ca_debug_vector_sel;		/* 0x000100 */
+	u64	ca_debug_mux_core_sel;		/* 0x000108 */
+	u64	ca_debug_mux_pci_sel;		/* 0x000110 */
+	u64	ca_debug_domain_sel;		/* 0x000118 */
+
+	u64	ca_pad_000120[28];		/* 0x0001{20..F8} */
+
+	u64	ca_gart_ptr_table;		/* 0x200 */
+	u64	ca_gart_tlb_addr[8];		/* 0x2{08..40} */
+};
+
+/*
+ * Mask/shift definitions for TIO:CA registers.  The convention here is
+ * to mainly use the names as they appear in the "TIO AEGIS Programmers'
+ * Reference" with a CA_ prefix added.  Some exceptions were made to fix
+ * duplicate field names or to generalize fields that are common to
+ * different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for
+ * example).
+ *
+ * Fields consisting of a single bit have a single #define have a single
+ * macro declaration to mask the bit.  Fields consisting of multiple bits
+ * have two declarations: one to mask the proper bits in a register, and 
+ * a second with the suffix "_SHFT" to identify how far the mask needs to
+ * be shifted right to get its base value.
+ */
+
+/* ==== ca_control1 */
+#define CA_SYS_BIG_END			(1ull << 0)
+#define CA_DMA_AGP_SWAP			(1ull << 1)
+#define CA_DMA_PCI_SWAP			(1ull << 2)
+#define CA_PIO_IO_SWAP			(1ull << 3)
+#define CA_PIO_MEM_SWAP			(1ull << 4)
+#define CA_GFX_WR_SWAP			(1ull << 5)
+#define CA_AGP_FW_ENABLE		(1ull << 6)
+#define CA_AGP_CAL_CYCLE		(0x7ull << 7)
+#define CA_AGP_CAL_CYCLE_SHFT		7
+#define CA_AGP_CAL_PRSCL_BYP		(1ull << 10)
+#define CA_AGP_INIT_CAL_ENB		(1ull << 11)
+#define CA_INJ_ADDR_PERR		(1ull << 12)
+#define CA_INJ_DATA_PERR		(1ull << 13)
+	/* bits 15:14 unused */
+#define CA_PCIM_IO_NBE_AD		(0x7ull << 16)
+#define CA_PCIM_IO_NBE_AD_SHFT		16
+#define CA_PCIM_FAST_BTB_ENB		(1ull << 19)
+	/* bits 23:20 unused */
+#define CA_PIO_ADDR_OFFSET		(0xffull << 24)
+#define CA_PIO_ADDR_OFFSET_SHFT		24
+	/* bits 35:32 unused */
+#define CA_AGPDMA_OP_COMBDELAY		(0x1full << 36)
+#define CA_AGPDMA_OP_COMBDELAY_SHFT	36
+	/* bit 41 unused */
+#define CA_AGPDMA_OP_ENB_COMBDELAY	(1ull << 42)
+#define	CA_PCI_INT_LPCNT		(0xffull << 44)
+#define CA_PCI_INT_LPCNT_SHFT		44
+	/* bits 63:52 unused */
+
+/* ==== ca_control2 */
+#define CA_AGP_LATENCY_TO		(0xffull << 0)
+#define CA_AGP_LATENCY_TO_SHFT		0
+#define CA_PCI_LATENCY_TO		(0xffull << 8)
+#define CA_PCI_LATENCY_TO_SHFT		8
+#define CA_PCI_MAX_RETRY		(0x3ffull << 16)
+#define CA_PCI_MAX_RETRY_SHFT		16
+	/* bits 27:26 unused */
+#define CA_RT_INT_EN			(0x3ull << 28)
+#define CA_RT_INT_EN_SHFT			28
+#define CA_MSI_INT_ENB			(1ull << 30)
+#define CA_PCI_ARB_ERR_ENB		(1ull << 31)
+#define CA_GART_MEM_PARAM		(0x3ull << 32)
+#define CA_GART_MEM_PARAM_SHFT		32
+#define CA_GART_RD_PREFETCH_ENB		(1ull << 34)
+#define CA_GART_WR_PREFETCH_ENB		(1ull << 35)
+#define CA_GART_FLUSH_TLB		(1ull << 36)
+	/* bits 39:37 unused */
+#define CA_CRM_TNUMTO_PERIOD		(0x1fffull << 40)
+#define CA_CRM_TNUMTO_PERIOD_SHFT	40
+	/* bits 55:53 unused */
+#define CA_CRM_TNUMTO_ENB		(1ull << 56)
+#define CA_CRM_PRESCALER_BYP		(1ull << 57)
+	/* bits 59:58 unused */
+#define CA_CRM_MAX_CREDIT		(0x7ull << 60)
+#define CA_CRM_MAX_CREDIT_SHFT		60
+	/* bit 63 unused */
+
+/* ==== ca_status1 */
+#define CA_CORELET_ID			(0x3ull << 0)
+#define CA_CORELET_ID_SHFT		0
+#define CA_INTA_N			(1ull << 2)
+#define CA_INTB_N			(1ull << 3)
+#define CA_CRM_CREDIT_AVAIL		(0x7ull << 4)
+#define CA_CRM_CREDIT_AVAIL_SHFT	4
+	/* bit 7 unused */
+#define CA_CRM_SPACE_AVAIL		(0x7full << 8)
+#define CA_CRM_SPACE_AVAIL_SHFT		8
+	/* bit 15 unused */
+#define CA_GART_TLB_VAL			(0xffull << 16)
+#define CA_GART_TLB_VAL_SHFT		16
+	/* bits 63:24 unused */
+
+/* ==== ca_status2 */
+#define CA_GFX_CREDIT_AVAIL		(0xffull << 0)
+#define CA_GFX_CREDIT_AVAIL_SHFT	0
+#define CA_GFX_OPQ_AVAIL		(0xffull << 8)
+#define CA_GFX_OPQ_AVAIL_SHFT		8
+#define CA_GFX_WRBUFF_AVAIL		(0xffull << 16)
+#define CA_GFX_WRBUFF_AVAIL_SHFT	16
+#define CA_ADMA_OPQ_AVAIL		(0xffull << 24)
+#define CA_ADMA_OPQ_AVAIL_SHFT		24
+#define CA_ADMA_WRBUFF_AVAIL		(0xffull << 32)
+#define CA_ADMA_WRBUFF_AVAIL_SHFT	32
+#define CA_ADMA_RDBUFF_AVAIL		(0x7full << 40)
+#define CA_ADMA_RDBUFF_AVAIL_SHFT	40
+#define CA_PCI_PIO_OP_STAT		(1ull << 47)
+#define CA_PDMA_OPQ_AVAIL		(0xfull << 48)
+#define CA_PDMA_OPQ_AVAIL_SHFT		48
+#define CA_PDMA_WRBUFF_AVAIL		(0xfull << 52)
+#define CA_PDMA_WRBUFF_AVAIL_SHFT	52
+#define CA_PDMA_RDBUFF_AVAIL		(0x3ull << 56)
+#define CA_PDMA_RDBUFF_AVAIL_SHFT	56
+	/* bits 63:58 unused */
+
+/* ==== ca_gart_aperature */
+#define CA_GART_AP_ENB_AGP		(1ull << 0)
+#define CA_GART_PAGE_SIZE		(1ull << 1)
+#define CA_GART_AP_ENB_PCI		(1ull << 2)
+	/* bits 11:3 unused */
+#define CA_GART_AP_SIZE			(0x3ffull << 12)
+#define CA_GART_AP_SIZE_SHFT		12
+#define CA_GART_AP_BASE			(0x3ffffffffffull << 22)
+#define CA_GART_AP_BASE_SHFT		22
+
+/* ==== ca_inta_dest_addr
+   ==== ca_intb_dest_addr 
+   ==== ca_err_int_dest_addr */
+	/* bits 2:0 unused */
+#define CA_INT_DEST_ADDR		(0x7ffffffffffffull << 3)
+#define CA_INT_DEST_ADDR_SHFT		3
+	/* bits 55:54 unused */
+#define CA_INT_DEST_VECT		(0xffull << 56)
+#define CA_INT_DEST_VECT_SHFT		56
+
+/* ==== ca_int_status */
+/* ==== ca_int_status_alias */
+/* ==== ca_mult_error */
+/* ==== ca_mult_error_alias */
+/* ==== ca_first_error */
+/* ==== ca_int_mask */
+#define CA_PCI_ERR			(1ull << 0)
+	/* bits 3:1 unused */
+#define CA_GART_FETCH_ERR		(1ull << 4)
+#define CA_GFX_WR_OVFLW			(1ull << 5)
+#define CA_PIO_REQ_OVFLW		(1ull << 6)
+#define CA_CRM_PKTERR			(1ull << 7)
+#define CA_CRM_DVERR			(1ull << 8)
+#define CA_TNUMTO			(1ull << 9)
+#define CA_CXM_RSP_CRED_OVFLW		(1ull << 10)
+#define CA_CXM_REQ_CRED_OVFLW		(1ull << 11)
+#define CA_PIO_INVALID_ADDR		(1ull << 12)
+#define CA_PCI_ARB_TO			(1ull << 13)
+#define CA_AGP_REQ_OFLOW		(1ull << 14)
+#define CA_SBA_TYPE1_ERR		(1ull << 15)
+	/* bit 16 unused */
+#define CA_INTA				(1ull << 17)
+#define CA_INTB				(1ull << 18)
+#define CA_MULT_INTA			(1ull << 19)
+#define CA_MULT_INTB			(1ull << 20)
+#define CA_GFX_CREDIT_OVFLW		(1ull << 21)
+	/* bits 63:22 unused */
+
+/* ==== ca_crm_pkterr_type */
+/* ==== ca_crm_pkterr_type_alias */
+#define CA_CRM_PKTERR_SBERR_HDR		(1ull << 0)
+#define CA_CRM_PKTERR_DIDN		(1ull << 1)
+#define CA_CRM_PKTERR_PACTYPE		(1ull << 2)
+#define CA_CRM_PKTERR_INV_TNUM		(1ull << 3)
+#define CA_CRM_PKTERR_ADDR_RNG		(1ull << 4)
+#define CA_CRM_PKTERR_ADDR_ALGN		(1ull << 5)
+#define CA_CRM_PKTERR_HDR_PARAM		(1ull << 6)
+#define CA_CRM_PKTERR_CW_ERR		(1ull << 7)
+#define CA_CRM_PKTERR_SBERR_NH		(1ull << 8)
+#define CA_CRM_PKTERR_EARLY_TERM	(1ull << 9)
+#define CA_CRM_PKTERR_EARLY_TAIL	(1ull << 10)
+#define CA_CRM_PKTERR_MSSNG_TAIL	(1ull << 11)
+#define CA_CRM_PKTERR_MSSNG_HDR		(1ull << 12)
+	/* bits 15:13 unused */
+#define CA_FIRST_CRM_PKTERR_SBERR_HDR	(1ull << 16)
+#define CA_FIRST_CRM_PKTERR_DIDN	(1ull << 17)
+#define CA_FIRST_CRM_PKTERR_PACTYPE	(1ull << 18)
+#define CA_FIRST_CRM_PKTERR_INV_TNUM	(1ull << 19)
+#define CA_FIRST_CRM_PKTERR_ADDR_RNG	(1ull << 20)
+#define CA_FIRST_CRM_PKTERR_ADDR_ALGN	(1ull << 21)
+#define CA_FIRST_CRM_PKTERR_HDR_PARAM	(1ull << 22)
+#define CA_FIRST_CRM_PKTERR_CW_ERR	(1ull << 23)
+#define CA_FIRST_CRM_PKTERR_SBERR_NH	(1ull << 24)
+#define CA_FIRST_CRM_PKTERR_EARLY_TERM	(1ull << 25)
+#define CA_FIRST_CRM_PKTERR_EARLY_TAIL	(1ull << 26)
+#define CA_FIRST_CRM_PKTERR_MSSNG_TAIL	(1ull << 27)
+#define CA_FIRST_CRM_PKTERR_MSSNG_HDR	(1ull << 28)
+	/* bits 63:29 unused */
+
+/* ==== ca_crm_ct_error_detail_1 */
+#define CA_PKT_TYPE			(0xfull << 0)
+#define CA_PKT_TYPE_SHFT		0
+#define CA_SRC_ID			(0x3ull << 4)
+#define CA_SRC_ID_SHFT			4
+#define CA_DATA_SZ			(0x3ull << 6)
+#define CA_DATA_SZ_SHFT			6
+#define CA_TNUM				(0xffull << 8)
+#define CA_TNUM_SHFT			8
+#define CA_DW_DATA_EN			(0xffull << 16)
+#define CA_DW_DATA_EN_SHFT		16
+#define CA_GFX_CRED			(0xffull << 24)
+#define CA_GFX_CRED_SHFT		24
+#define CA_MEM_RD_PARAM			(0x3ull << 32)
+#define CA_MEM_RD_PARAM_SHFT		32
+#define CA_PIO_OP			(1ull << 34)
+#define CA_CW_ERR			(1ull << 35)
+	/* bits 62:36 unused */
+#define CA_VALID			(1ull << 63)
+
+/* ==== ca_crm_ct_error_detail_2 */
+	/* bits 2:0 unused */
+#define CA_PKT_ADDR			(0x1fffffffffffffull << 3)
+#define CA_PKT_ADDR_SHFT		3
+	/* bits 63:56 unused */
+
+/* ==== ca_crm_tnumto */
+#define CA_CRM_TNUMTO_VAL		(0xffull << 0)
+#define CA_CRM_TNUMTO_VAL_SHFT		0
+#define CA_CRM_TNUMTO_WR		(1ull << 8)
+	/* bits 63:9 unused */
+
+/* ==== ca_gart_err */
+#define CA_GART_ERR_SOURCE		(0x3ull << 0)
+#define CA_GART_ERR_SOURCE_SHFT		0
+	/* bits 3:2 unused */
+#define CA_GART_ERR_ADDR		(0xfffffffffull << 4)
+#define CA_GART_ERR_ADDR_SHFT		4
+	/* bits 63:40 unused */
+
+/* ==== ca_pcierr_type */
+#define CA_PCIERR_DATA			(0xffffffffull << 0)
+#define CA_PCIERR_DATA_SHFT		0
+#define CA_PCIERR_ENB			(0xfull << 32)
+#define CA_PCIERR_ENB_SHFT		32
+#define CA_PCIERR_CMD			(0xfull << 36)
+#define CA_PCIERR_CMD_SHFT		36
+#define CA_PCIERR_A64			(1ull << 40)
+#define CA_PCIERR_SLV_SERR		(1ull << 41)
+#define CA_PCIERR_SLV_WR_PERR		(1ull << 42)
+#define CA_PCIERR_SLV_RD_PERR		(1ull << 43)
+#define CA_PCIERR_MST_SERR		(1ull << 44)
+#define CA_PCIERR_MST_WR_PERR		(1ull << 45)
+#define CA_PCIERR_MST_RD_PERR		(1ull << 46)
+#define CA_PCIERR_MST_MABT		(1ull << 47)
+#define CA_PCIERR_MST_TABT		(1ull << 48)
+#define CA_PCIERR_MST_RETRY_TOUT	(1ull << 49)
+
+#define CA_PCIERR_TYPES \
+	(CA_PCIERR_A64|CA_PCIERR_SLV_SERR| \
+	 CA_PCIERR_SLV_WR_PERR|CA_PCIERR_SLV_RD_PERR| \
+	 CA_PCIERR_MST_SERR|CA_PCIERR_MST_WR_PERR|CA_PCIERR_MST_RD_PERR| \
+	 CA_PCIERR_MST_MABT|CA_PCIERR_MST_TABT|CA_PCIERR_MST_RETRY_TOUT)
+
+	/* bits 63:50 unused */
+
+/* ==== ca_pci_dma_addr_extn */
+#define CA_UPPER_NODE_OFFSET		(0x3full << 0)
+#define CA_UPPER_NODE_OFFSET_SHFT	0
+	/* bits 7:6 unused */
+#define CA_CHIPLET_ID			(0x3ull << 8)
+#define CA_CHIPLET_ID_SHFT		8
+	/* bits 11:10 unused */
+#define CA_PCI_DMA_NODE_ID		(0xffffull << 12)
+#define CA_PCI_DMA_NODE_ID_SHFT		12
+	/* bits 27:26 unused */
+#define CA_PCI_DMA_PIO_MEM_TYPE		(1ull << 28)
+	/* bits 63:29 unused */
+
+
+/* ==== ca_agp_dma_addr_extn */
+	/* bits 19:0 unused */
+#define CA_AGP_DMA_NODE_ID		(0xffffull << 20)
+#define CA_AGP_DMA_NODE_ID_SHFT		20
+	/* bits 27:26 unused */
+#define CA_AGP_DMA_PIO_MEM_TYPE		(1ull << 28)
+	/* bits 63:29 unused */
+
+/* ==== ca_debug_vector_sel */
+#define CA_DEBUG_MN_VSEL		(0xfull << 0)
+#define CA_DEBUG_MN_VSEL_SHFT		0
+#define CA_DEBUG_PP_VSEL		(0xfull << 4)
+#define CA_DEBUG_PP_VSEL_SHFT		4
+#define CA_DEBUG_GW_VSEL		(0xfull << 8)
+#define CA_DEBUG_GW_VSEL_SHFT		8
+#define CA_DEBUG_GT_VSEL		(0xfull << 12)
+#define CA_DEBUG_GT_VSEL_SHFT		12
+#define CA_DEBUG_PD_VSEL		(0xfull << 16)
+#define CA_DEBUG_PD_VSEL_SHFT		16
+#define CA_DEBUG_AD_VSEL		(0xfull << 20)
+#define CA_DEBUG_AD_VSEL_SHFT		20
+#define CA_DEBUG_CX_VSEL		(0xfull << 24)
+#define CA_DEBUG_CX_VSEL_SHFT		24
+#define CA_DEBUG_CR_VSEL		(0xfull << 28)
+#define CA_DEBUG_CR_VSEL_SHFT		28
+#define CA_DEBUG_BA_VSEL		(0xfull << 32)
+#define CA_DEBUG_BA_VSEL_SHFT		32
+#define CA_DEBUG_PE_VSEL		(0xfull << 36)
+#define CA_DEBUG_PE_VSEL_SHFT		36
+#define CA_DEBUG_BO_VSEL		(0xfull << 40)
+#define CA_DEBUG_BO_VSEL_SHFT		40
+#define CA_DEBUG_BI_VSEL		(0xfull << 44)
+#define CA_DEBUG_BI_VSEL_SHFT		44
+#define CA_DEBUG_AS_VSEL		(0xfull << 48)
+#define CA_DEBUG_AS_VSEL_SHFT		48
+#define CA_DEBUG_PS_VSEL		(0xfull << 52)
+#define CA_DEBUG_PS_VSEL_SHFT		52
+#define CA_DEBUG_PM_VSEL		(0xfull << 56)
+#define CA_DEBUG_PM_VSEL_SHFT		56
+	/* bits 63:60 unused */
+
+/* ==== ca_debug_mux_core_sel */
+/* ==== ca_debug_mux_pci_sel */
+#define CA_DEBUG_MSEL0			(0x7ull << 0)
+#define CA_DEBUG_MSEL0_SHFT		0
+	/* bit 3 unused */
+#define CA_DEBUG_NSEL0			(0x7ull << 4)
+#define CA_DEBUG_NSEL0_SHFT		4
+	/* bit 7 unused */
+#define CA_DEBUG_MSEL1			(0x7ull << 8)
+#define CA_DEBUG_MSEL1_SHFT		8
+	/* bit 11 unused */
+#define CA_DEBUG_NSEL1			(0x7ull << 12)
+#define CA_DEBUG_NSEL1_SHFT		12
+	/* bit 15 unused */
+#define CA_DEBUG_MSEL2			(0x7ull << 16)
+#define CA_DEBUG_MSEL2_SHFT		16
+	/* bit 19 unused */
+#define CA_DEBUG_NSEL2			(0x7ull << 20)
+#define CA_DEBUG_NSEL2_SHFT		20
+	/* bit 23 unused */
+#define CA_DEBUG_MSEL3			(0x7ull << 24)
+#define CA_DEBUG_MSEL3_SHFT		24
+	/* bit 27 unused */
+#define CA_DEBUG_NSEL3			(0x7ull << 28)
+#define CA_DEBUG_NSEL3_SHFT		28
+	/* bit 31 unused */
+#define CA_DEBUG_MSEL4			(0x7ull << 32)
+#define CA_DEBUG_MSEL4_SHFT		32
+	/* bit 35 unused */
+#define CA_DEBUG_NSEL4			(0x7ull << 36)
+#define CA_DEBUG_NSEL4_SHFT		36
+	/* bit 39 unused */
+#define CA_DEBUG_MSEL5			(0x7ull << 40)
+#define CA_DEBUG_MSEL5_SHFT		40
+	/* bit 43 unused */
+#define CA_DEBUG_NSEL5			(0x7ull << 44)
+#define CA_DEBUG_NSEL5_SHFT		44
+	/* bit 47 unused */
+#define CA_DEBUG_MSEL6			(0x7ull << 48)
+#define CA_DEBUG_MSEL6_SHFT		48
+	/* bit 51 unused */
+#define CA_DEBUG_NSEL6			(0x7ull << 52)
+#define CA_DEBUG_NSEL6_SHFT		52
+	/* bit 55 unused */
+#define CA_DEBUG_MSEL7			(0x7ull << 56)
+#define CA_DEBUG_MSEL7_SHFT		56
+	/* bit 59 unused */
+#define CA_DEBUG_NSEL7			(0x7ull << 60)
+#define CA_DEBUG_NSEL7_SHFT		60
+	/* bit 63 unused */
+
+
+/* ==== ca_debug_domain_sel */
+#define CA_DEBUG_DOMAIN_L		(1ull << 0)
+#define CA_DEBUG_DOMAIN_H		(1ull << 1)
+	/* bits 63:2 unused */
+
+/* ==== ca_gart_ptr_table */
+#define CA_GART_PTR_VAL			(1ull << 0)
+	/* bits 11:1 unused */
+#define CA_GART_PTR_ADDR		(0xfffffffffffull << 12)
+#define CA_GART_PTR_ADDR_SHFT		12
+	/* bits 63:56 unused */
+
+/* ==== ca_gart_tlb_addr[0-7] */
+#define CA_GART_TLB_ADDR		(0xffffffffffffffull << 0)
+#define CA_GART_TLB_ADDR_SHFT		0
+	/* bits 62:56 unused */
+#define CA_GART_TLB_ENTRY_VAL		(1ull << 63)
+
+/*
+ * PIO address space ranges for TIO:CA
+ */
+
+/* CA internal registers */
+#define CA_PIO_ADMIN			0x00000000
+#define CA_PIO_ADMIN_LEN		0x00010000
+
+/* GFX Write Buffer - Diagnostics */
+#define CA_PIO_GFX			0x00010000
+#define CA_PIO_GFX_LEN			0x00010000
+
+/* AGP DMA Write Buffer - Diagnostics */
+#define CA_PIO_AGP_DMAWRITE		0x00020000
+#define CA_PIO_AGP_DMAWRITE_LEN		0x00010000
+
+/* AGP DMA READ Buffer - Diagnostics */
+#define CA_PIO_AGP_DMAREAD		0x00030000
+#define CA_PIO_AGP_DMAREAD_LEN		0x00010000
+
+/* PCI Config Type 0 */
+#define CA_PIO_PCI_TYPE0_CONFIG		0x01000000
+#define CA_PIO_PCI_TYPE0_CONFIG_LEN	0x01000000
+
+/* PCI Config Type 1 */
+#define CA_PIO_PCI_TYPE1_CONFIG		0x02000000
+#define CA_PIO_PCI_TYPE1_CONFIG_LEN	0x01000000
+
+/* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */
+#define CA_PIO_PCI_IO			0x03000000
+#define CA_PIO_PCI_IO_LEN		0x05000000
+
+/* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */
+/*	use Fast Write if enabled and coretalk packet type is a GFX request */
+#define CA_PIO_PCI_MEM_OFFSET		0x08000000
+#define CA_PIO_PCI_MEM_OFFSET_LEN	0x08000000
+
+/* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */
+/*	use Fast Write if enabled and coretalk packet type is a GFX request */
+#define CA_PIO_PCI_MEM			0x40000000
+#define CA_PIO_PCI_MEM_LEN		0xc0000000
+
+/*
+ * DMA space
+ *
+ * The CA aperature (ie. bus address range) mapped by the GART is segmented into
+ * two parts.  The lower portion of the aperature is used for mapping 32 bit
+ * PCI addresses which are managed by the dma interfaces in this file.  The
+ * upper poprtion of the aperature is used for mapping 48 bit AGP addresses.
+ * The AGP portion of the aperature is managed by the agpgart_be.c driver
+ * in drivers/linux/agp.  There are ca-specific hooks in that driver to
+ * manipulate the gart, but management of the AGP portion of the aperature
+ * is the responsibility of that driver.
+ *
+ * CA allows three main types of DMA mapping:
+ *
+ * PCI 64-bit	Managed by this driver
+ * PCI 32-bit 	Managed by this driver
+ * AGP 48-bit	Managed by hooks in the /dev/agpgart driver
+ *
+ * All of the above can optionally be remapped through the GART.  The following
+ * table lists the combinations of addressing types and GART remapping that
+ * is currently supported by the driver (h/w supports all, s/w limits this):
+ *
+ *		PCI64		PCI32		AGP48
+ * GART		no		yes		yes
+ * Direct	yes		yes		no
+ *
+ * GART remapping of PCI64 is not done because there is no need to.  The
+ * 64 bit PCI address holds all of the information necessary to target any
+ * memory in the system.
+ *
+ * AGP48 is always mapped through the GART.  Management of the AGP48 portion
+ * of the aperature is the responsibility of code in the agpgart_be driver.
+ *
+ * The non-64 bit bus address space will currently be partitioned like this:
+ *
+ *	0xffff_ffff_ffff	+--------
+ *				| AGP48 direct
+ *				| Space managed by this driver
+ *	CA_AGP_DIRECT_BASE	+--------
+ *				| AGP GART mapped (gfx aperature)
+ *				| Space managed by /dev/agpgart driver
+ *				| This range is exposed to the agpgart
+ * 				| driver as the "graphics aperature"
+ *	CA_AGP_MAPPED_BASE	+-----
+ *				| PCI GART mapped
+ *				| Space managed by this driver		
+ *	CA_PCI32_MAPPED_BASE	+----
+ *				| PCI32 direct
+ *				| Space managed by this driver
+ *	0xC000_0000		+--------
+ *	(CA_PCI32_DIRECT_BASE)
+ *
+ * The bus address range CA_PCI32_MAPPED_BASE through CA_AGP_DIRECT_BASE
+ * is what we call the CA aperature.  Addresses falling in this range will
+ * be remapped using the GART.
+ *
+ * The bus address range CA_AGP_MAPPED_BASE through CA_AGP_DIRECT_BASE
+ * is what we call the graphics aperature.  This is a subset of the CA
+ * aperature and is under the control of the agpgart_be driver.
+ *
+ * CA_PCI32_MAPPED_BASE, CA_AGP_MAPPED_BASE, and CA_AGP_DIRECT_BASE are
+ * somewhat arbitrary values.  The known constraints on choosing these is:
+ *
+ * 1)  CA_AGP_DIRECT_BASE-CA_PCI32_MAPPED_BASE+1 (the CA aperature size)
+ *     must be one of the values supported by the ca_gart_aperature register.
+ *     Currently valid values are: 4MB through 4096MB in powers of 2 increments
+ *
+ * 2)  CA_AGP_DIRECT_BASE-CA_AGP_MAPPED_BASE+1 (the gfx aperature size)
+ *     must be in MB units since that's what the agpgart driver assumes.
+ */
+
+/*
+ * Define Bus DMA ranges.  These are configurable (see constraints above)
+ * and will probably need tuning based on experience.
+ */
+
+
+/*
+ * 11/24/03
+ * CA has an addressing glitch w.r.t. PCI direct 32 bit DMA that makes it
+ * generally unusable.  The problem is that for PCI direct 32 
+ * DMA's, all 32 bits of the bus address are used to form the lower 32 bits
+ * of the coretalk address, and coretalk bits 38:32 come from a register.
+ * Since only PCI bus addresses 0xC0000000-0xFFFFFFFF (1GB) are available
+ * for DMA (the rest is allocated to PIO), host node addresses need to be
+ * such that their lower 32 bits fall in the 0xC0000000-0xffffffff range
+ * as well.  So there can be no PCI32 direct DMA below 3GB!!  For this
+ * reason we set the CA_PCI32_DIRECT_SIZE to 0 which essentially makes
+ * tioca_dma_direct32() a noop but preserves the code flow should this issue
+ * be fixed in a respin.
+ *
+ * For now, all PCI32 DMA's must be mapped through the GART.
+ */
+
+#define CA_PCI32_DIRECT_BASE	0xC0000000UL	/* BASE not configurable */
+#define CA_PCI32_DIRECT_SIZE	0x00000000UL	/* 0 MB */
+
+#define CA_PCI32_MAPPED_BASE	0xC0000000UL
+#define CA_PCI32_MAPPED_SIZE	0x40000000UL	/* 2GB */
+
+#define CA_AGP_MAPPED_BASE	0x80000000UL
+#define CA_AGP_MAPPED_SIZE	0x40000000UL	/* 2GB */
+
+#define CA_AGP_DIRECT_BASE	0x40000000UL	/* 2GB */
+#define CA_AGP_DIRECT_SIZE	0x40000000UL
+
+#define CA_APERATURE_BASE	(CA_AGP_MAPPED_BASE)
+#define CA_APERATURE_SIZE	(CA_AGP_MAPPED_SIZE+CA_PCI32_MAPPED_SIZE)
+
+#endif  /* _ASM_IA64_SN_TIO_TIOCA_H */
diff --git a/arch/ia64/include/asm/sn/tioca_provider.h b/arch/ia64/include/asm/sn/tioca_provider.h
new file mode 100644
index 0000000..9a820ac
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioca_provider.h
@@ -0,0 +1,207 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
+#define _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
+
+#include <asm/sn/tioca.h>
+
+/*
+ * WAR enables
+ * Defines for individual WARs. Each is a bitmask of applicable
+ * part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B,
+ * (3 << 1) == (rev A or rev B), etc
+ */
+
+#define TIOCA_WAR_ENABLED(pv, tioca_common) \
+	((1 << tioca_common->ca_rev) & pv)
+
+  /* TIO:ICE:FRZ:Freezer loses a PIO data ucred on PIO RD RSP with CW error */
+#define PV907908 (1 << 1)
+  /* ATI config space problems after BIOS execution starts */
+#define PV908234 (1 << 1)
+  /* CA:AGPDMA write request data mismatch with ABC1CL merge */
+#define PV895469 (1 << 1)
+  /* TIO:CA TLB invalidate of written GART entries possibly not occurring in CA*/
+#define PV910244 (1 << 1)
+
+struct tioca_dmamap{
+	struct list_head	cad_list;	/* headed by ca_list */
+
+	dma_addr_t		cad_dma_addr;	/* Linux dma handle */
+	uint			cad_gart_entry; /* start entry in ca_gart_pagemap */
+	uint			cad_gart_size;	/* #entries for this map */
+};
+
+/*
+ * Kernel only fields.  Prom may look at this stuff for debugging only.
+ * Access this structure through the ca_kernel_private ptr.
+ */
+
+struct tioca_common ;
+
+struct tioca_kernel {
+	struct tioca_common	*ca_common;	/* tioca this belongs to */
+	struct list_head	ca_list;	/* list of all ca's */
+	struct list_head	ca_dmamaps;
+	spinlock_t		ca_lock;	/* Kernel lock */
+	cnodeid_t		ca_closest_node;
+	struct list_head	*ca_devices;	/* bus->devices */
+
+	/*
+	 * General GART stuff
+	 */
+	u64	ca_ap_size;		/* size of aperature in bytes */
+	u32	ca_gart_entries;	/* # u64 entries in gart */
+	u32	ca_ap_pagesize; 	/* aperature page size in bytes */
+	u64	ca_ap_bus_base; 	/* bus address of CA aperature */
+	u64	ca_gart_size;		/* gart size in bytes */
+	u64	*ca_gart;		/* gart table vaddr */
+	u64	ca_gart_coretalk_addr;	/* gart coretalk addr */
+	u8		ca_gart_iscoherent;	/* used in tioca_tlbflush */
+
+	/* PCI GART convenience values */
+	u64	ca_pciap_base;		/* pci aperature bus base address */
+	u64	ca_pciap_size;		/* pci aperature size (bytes) */
+	u64	ca_pcigart_base;	/* gfx GART bus base address */
+	u64	*ca_pcigart;		/* gfx GART vm address */
+	u32	ca_pcigart_entries;
+	u32	ca_pcigart_start;	/* PCI start index in ca_gart */
+	void		*ca_pcigart_pagemap;
+
+	/* AGP GART convenience values */
+	u64	ca_gfxap_base;		/* gfx aperature bus base address */
+	u64	ca_gfxap_size;		/* gfx aperature size (bytes) */
+	u64	ca_gfxgart_base;	/* gfx GART bus base address */
+	u64	*ca_gfxgart;		/* gfx GART vm address */
+	u32	ca_gfxgart_entries;
+	u32	ca_gfxgart_start;	/* agpgart start index in ca_gart */
+};
+
+/*
+ * Common tioca info shared between kernel and prom
+ *
+ * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES
+ * TO THE PROM VERSION.
+ */
+
+struct tioca_common {
+	struct pcibus_bussoft	ca_common;	/* common pciio header */
+
+	u32		ca_rev;
+	u32		ca_closest_nasid;
+
+	u64		ca_prom_private;
+	u64		ca_kernel_private;
+};
+
+/**
+ * tioca_paddr_to_gart - Convert an SGI coretalk address to a CA GART entry
+ * @paddr: page address to convert
+ *
+ * Convert a system [coretalk] address to a GART entry.  GART entries are
+ * formed using the following:
+ *
+ *     data = ( (1<<63) |  ( (REMAP_NODE_ID << 40) | (MD_CHIPLET_ID << 38) | 
+ * (REMAP_SYS_ADDR) ) >> 12 )
+ *
+ * DATA written to 1 GART TABLE Entry in system memory is remapped system
+ * addr for 1 page 
+ *
+ * The data is for coretalk address format right shifted 12 bits with a
+ * valid bit.
+ *
+ *	GART_TABLE_ENTRY [ 25:0 ]  -- REMAP_SYS_ADDRESS[37:12].
+ *	GART_TABLE_ENTRY [ 27:26 ] -- SHUB MD chiplet id.
+ *	GART_TABLE_ENTRY [ 41:28 ] -- REMAP_NODE_ID.
+ *	GART_TABLE_ENTRY [ 63 ]    -- Valid Bit 
+ */
+static inline u64
+tioca_paddr_to_gart(unsigned long paddr)
+{
+	/*
+	 * We are assuming right now that paddr already has the correct
+	 * format since the address from xtalk_dmaXXX should already have
+	 * NODE_ID, CHIPLET_ID, and SYS_ADDR in the correct locations.
+	 */
+
+	return ((paddr) >> 12) | (1UL << 63);
+}
+
+/**
+ * tioca_physpage_to_gart - Map a host physical page for SGI CA based DMA
+ * @page_addr: system page address to map
+ */
+
+static inline unsigned long
+tioca_physpage_to_gart(u64 page_addr)
+{
+	u64 coretalk_addr;
+
+	coretalk_addr = PHYS_TO_TIODMA(page_addr);
+	if (!coretalk_addr) {
+		return 0;
+	}
+
+	return tioca_paddr_to_gart(coretalk_addr);
+}
+
+/**
+ * tioca_tlbflush - invalidate cached SGI CA GART TLB entries
+ * @tioca_kernel: CA context 
+ *
+ * Invalidate tlb entries for a given CA GART.  Main complexity is to account
+ * for revA bug.
+ */
+static inline void
+tioca_tlbflush(struct tioca_kernel *tioca_kernel)
+{
+	volatile u64 tmp;
+	volatile struct tioca __iomem *ca_base;
+	struct tioca_common *tioca_common;
+
+	tioca_common = tioca_kernel->ca_common;
+	ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
+
+	/*
+	 * Explicit flushes not needed if GART is in cached mode
+	 */
+	if (tioca_kernel->ca_gart_iscoherent) {
+		if (TIOCA_WAR_ENABLED(PV910244, tioca_common)) {
+			/*
+			 * PV910244:  RevA CA needs explicit flushes.
+			 * Need to put GART into uncached mode before
+			 * flushing otherwise the explicit flush is ignored.
+			 *
+			 * Alternate WAR would be to leave GART cached and
+			 * touch every CL aligned GART entry.
+			 */
+
+			__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
+			__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
+			__sn_setq_relaxed(&ca_base->ca_control2,
+			    (0x2ull << CA_GART_MEM_PARAM_SHFT));
+			tmp = __sn_readq_relaxed(&ca_base->ca_control2);
+		}
+
+		return;
+	}
+
+	/*
+	 * Gart in uncached mode ... need an explicit flush.
+	 */
+
+	__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
+	tmp = __sn_readq_relaxed(&ca_base->ca_control2);
+}
+
+extern u32	tioca_gart_found;
+extern struct list_head tioca_list;
+extern int tioca_init_provider(void);
+extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern);
+#endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */
diff --git a/arch/ia64/include/asm/sn/tioce.h b/arch/ia64/include/asm/sn/tioce.h
new file mode 100644
index 0000000..6eae8ad
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioce.h
@@ -0,0 +1,760 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_IA64_SN_TIOCE_H__
+#define __ASM_IA64_SN_TIOCE_H__
+
+/* CE ASIC part & mfgr information  */
+#define TIOCE_PART_NUM			0xCE00
+#define TIOCE_SRC_ID			0x01
+#define TIOCE_REV_A			0x1
+
+/* CE Virtual PPB Vendor/Device IDs */
+#define CE_VIRT_PPB_VENDOR_ID		0x10a9
+#define CE_VIRT_PPB_DEVICE_ID		0x4002
+
+/* CE Host Bridge Vendor/Device IDs */
+#define CE_HOST_BRIDGE_VENDOR_ID	0x10a9
+#define CE_HOST_BRIDGE_DEVICE_ID	0x4001
+
+
+#define TIOCE_NUM_M40_ATES		4096
+#define TIOCE_NUM_M3240_ATES		2048
+#define TIOCE_NUM_PORTS			2
+
+/*
+ * Register layout for TIOCE.  MMR offsets are shown at the far right of the
+ * structure definition.
+ */
+typedef volatile struct tioce {
+	/*
+	 * ADMIN : Administration Registers
+	 */
+	u64	ce_adm_id;				/* 0x000000 */
+	u64	ce_pad_000008;				/* 0x000008 */
+	u64	ce_adm_dyn_credit_status;		/* 0x000010 */
+	u64	ce_adm_last_credit_status;		/* 0x000018 */
+	u64	ce_adm_credit_limit;			/* 0x000020 */
+	u64	ce_adm_force_credit;			/* 0x000028 */
+	u64	ce_adm_control;				/* 0x000030 */
+	u64	ce_adm_mmr_chn_timeout;			/* 0x000038 */
+	u64	ce_adm_ssp_ure_timeout;			/* 0x000040 */
+	u64	ce_adm_ssp_dre_timeout;			/* 0x000048 */
+	u64	ce_adm_ssp_debug_sel;			/* 0x000050 */
+	u64	ce_adm_int_status;			/* 0x000058 */
+	u64	ce_adm_int_status_alias;		/* 0x000060 */
+	u64	ce_adm_int_mask;			/* 0x000068 */
+	u64	ce_adm_int_pending;			/* 0x000070 */
+	u64	ce_adm_force_int;			/* 0x000078 */
+	u64	ce_adm_ure_ups_buf_barrier_flush;	/* 0x000080 */
+	u64	ce_adm_int_dest[15];	    /* 0x000088 -- 0x0000F8 */
+	u64	ce_adm_error_summary;			/* 0x000100 */
+	u64	ce_adm_error_summary_alias;		/* 0x000108 */
+	u64	ce_adm_error_mask;			/* 0x000110 */
+	u64	ce_adm_first_error;			/* 0x000118 */
+	u64	ce_adm_error_overflow;			/* 0x000120 */
+	u64	ce_adm_error_overflow_alias;		/* 0x000128 */
+	u64	ce_pad_000130[2];	    /* 0x000130 -- 0x000138 */
+	u64	ce_adm_tnum_error;			/* 0x000140 */
+	u64	ce_adm_mmr_err_detail;			/* 0x000148 */
+	u64	ce_adm_msg_sram_perr_detail;		/* 0x000150 */
+	u64	ce_adm_bap_sram_perr_detail;		/* 0x000158 */
+	u64	ce_adm_ce_sram_perr_detail;		/* 0x000160 */
+	u64	ce_adm_ce_credit_oflow_detail;		/* 0x000168 */
+	u64	ce_adm_tx_link_idle_max_timer;		/* 0x000170 */
+	u64	ce_adm_pcie_debug_sel;			/* 0x000178 */
+	u64	ce_pad_000180[16];	    /* 0x000180 -- 0x0001F8 */
+
+	u64	ce_adm_pcie_debug_sel_top;		/* 0x000200 */
+	u64	ce_adm_pcie_debug_lat_sel_lo_top;	/* 0x000208 */
+	u64	ce_adm_pcie_debug_lat_sel_hi_top;	/* 0x000210 */
+	u64	ce_adm_pcie_debug_trig_sel_top;		/* 0x000218 */
+	u64	ce_adm_pcie_debug_trig_lat_sel_lo_top;	/* 0x000220 */
+	u64	ce_adm_pcie_debug_trig_lat_sel_hi_top;	/* 0x000228 */
+	u64	ce_adm_pcie_trig_compare_top;		/* 0x000230 */
+	u64	ce_adm_pcie_trig_compare_en_top;	/* 0x000238 */
+	u64	ce_adm_ssp_debug_sel_top;		/* 0x000240 */
+	u64	ce_adm_ssp_debug_lat_sel_lo_top;	/* 0x000248 */
+	u64	ce_adm_ssp_debug_lat_sel_hi_top;	/* 0x000250 */
+	u64	ce_adm_ssp_debug_trig_sel_top;		/* 0x000258 */
+	u64	ce_adm_ssp_debug_trig_lat_sel_lo_top;	/* 0x000260 */
+	u64	ce_adm_ssp_debug_trig_lat_sel_hi_top;	/* 0x000268 */
+	u64	ce_adm_ssp_trig_compare_top;		/* 0x000270 */
+	u64	ce_adm_ssp_trig_compare_en_top;		/* 0x000278 */
+	u64	ce_pad_000280[48];	    /* 0x000280 -- 0x0003F8 */
+
+	u64	ce_adm_bap_ctrl;			/* 0x000400 */
+	u64	ce_pad_000408[127];	    /* 0x000408 -- 0x0007F8 */
+
+	u64	ce_msg_buf_data63_0[35];    /* 0x000800 -- 0x000918 */
+	u64	ce_pad_000920[29];	    /* 0x000920 -- 0x0009F8 */
+
+	u64	ce_msg_buf_data127_64[35];  /* 0x000A00 -- 0x000B18 */
+	u64	ce_pad_000B20[29];	    /* 0x000B20 -- 0x000BF8 */
+
+	u64	ce_msg_buf_parity[35];	    /* 0x000C00 -- 0x000D18 */
+	u64	ce_pad_000D20[29];	    /* 0x000D20 -- 0x000DF8 */
+
+	u64	ce_pad_000E00[576];	    /* 0x000E00 -- 0x001FF8 */
+
+	/*
+	 * LSI : LSI's PCI Express Link Registers (Link#1 and Link#2)
+	 * Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000
+	 * NOTE: the comment offsets at far right: let 'z' = {2 or 3}
+	 */
+	#define ce_lsi(link_num)	ce_lsi[link_num-1]
+	struct ce_lsi_reg {
+		u64	ce_lsi_lpu_id;			/* 0x00z000 */
+		u64	ce_lsi_rst;			/* 0x00z008 */
+		u64	ce_lsi_dbg_stat;		/* 0x00z010 */
+		u64	ce_lsi_dbg_cfg;			/* 0x00z018 */
+		u64	ce_lsi_ltssm_ctrl;		/* 0x00z020 */
+		u64	ce_lsi_lk_stat;			/* 0x00z028 */
+		u64	ce_pad_00z030[2];   /* 0x00z030 -- 0x00z038 */
+		u64	ce_lsi_int_and_stat;		/* 0x00z040 */
+		u64	ce_lsi_int_mask;		/* 0x00z048 */
+		u64	ce_pad_00z050[22];  /* 0x00z050 -- 0x00z0F8 */
+		u64	ce_lsi_lk_perf_cnt_sel;		/* 0x00z100 */
+		u64	ce_pad_00z108;			/* 0x00z108 */
+		u64	ce_lsi_lk_perf_cnt_ctrl;	/* 0x00z110 */
+		u64	ce_pad_00z118;			/* 0x00z118 */
+		u64	ce_lsi_lk_perf_cnt1;		/* 0x00z120 */
+		u64	ce_lsi_lk_perf_cnt1_test;	/* 0x00z128 */
+		u64	ce_lsi_lk_perf_cnt2;		/* 0x00z130 */
+		u64	ce_lsi_lk_perf_cnt2_test;	/* 0x00z138 */
+		u64	ce_pad_00z140[24];  /* 0x00z140 -- 0x00z1F8 */
+		u64	ce_lsi_lk_lyr_cfg;		/* 0x00z200 */
+		u64	ce_lsi_lk_lyr_status;		/* 0x00z208 */
+		u64	ce_lsi_lk_lyr_int_stat;		/* 0x00z210 */
+		u64	ce_lsi_lk_ly_int_stat_test;	/* 0x00z218 */
+		u64	ce_lsi_lk_ly_int_stat_mask;	/* 0x00z220 */
+		u64	ce_pad_00z228[3];   /* 0x00z228 -- 0x00z238 */
+		u64	ce_lsi_fc_upd_ctl;		/* 0x00z240 */
+		u64	ce_pad_00z248[3];   /* 0x00z248 -- 0x00z258 */
+		u64	ce_lsi_flw_ctl_upd_to_timer;	/* 0x00z260 */
+		u64	ce_lsi_flw_ctl_upd_timer0;	/* 0x00z268 */
+		u64	ce_lsi_flw_ctl_upd_timer1;	/* 0x00z270 */
+		u64	ce_pad_00z278[49];  /* 0x00z278 -- 0x00z3F8 */
+		u64	ce_lsi_freq_nak_lat_thrsh;	/* 0x00z400 */
+		u64	ce_lsi_ack_nak_lat_tmr;		/* 0x00z408 */
+		u64	ce_lsi_rply_tmr_thr;		/* 0x00z410 */
+		u64	ce_lsi_rply_tmr;		/* 0x00z418 */
+		u64	ce_lsi_rply_num_stat;		/* 0x00z420 */
+		u64	ce_lsi_rty_buf_max_addr;	/* 0x00z428 */
+		u64	ce_lsi_rty_fifo_ptr;		/* 0x00z430 */
+		u64	ce_lsi_rty_fifo_rd_wr_ptr;	/* 0x00z438 */
+		u64	ce_lsi_rty_fifo_cred;		/* 0x00z440 */
+		u64	ce_lsi_seq_cnt;			/* 0x00z448 */
+		u64	ce_lsi_ack_sent_seq_num;	/* 0x00z450 */
+		u64	ce_lsi_seq_cnt_fifo_max_addr;	/* 0x00z458 */
+		u64	ce_lsi_seq_cnt_fifo_ptr;	/* 0x00z460 */
+		u64	ce_lsi_seq_cnt_rd_wr_ptr;	/* 0x00z468 */
+		u64	ce_lsi_tx_lk_ts_ctl;		/* 0x00z470 */
+		u64	ce_pad_00z478;			/* 0x00z478 */
+		u64	ce_lsi_mem_addr_ctl;		/* 0x00z480 */
+		u64	ce_lsi_mem_d_ld0;		/* 0x00z488 */
+		u64	ce_lsi_mem_d_ld1;		/* 0x00z490 */
+		u64	ce_lsi_mem_d_ld2;		/* 0x00z498 */
+		u64	ce_lsi_mem_d_ld3;		/* 0x00z4A0 */
+		u64	ce_lsi_mem_d_ld4;		/* 0x00z4A8 */
+		u64	ce_pad_00z4B0[2];   /* 0x00z4B0 -- 0x00z4B8 */
+		u64	ce_lsi_rty_d_cnt;		/* 0x00z4C0 */
+		u64	ce_lsi_seq_buf_cnt;		/* 0x00z4C8 */
+		u64	ce_lsi_seq_buf_bt_d;		/* 0x00z4D0 */
+		u64	ce_pad_00z4D8;			/* 0x00z4D8 */
+		u64	ce_lsi_ack_lat_thr;		/* 0x00z4E0 */
+		u64	ce_pad_00z4E8[3];   /* 0x00z4E8 -- 0x00z4F8 */
+		u64	ce_lsi_nxt_rcv_seq_1_cntr;	/* 0x00z500 */
+		u64	ce_lsi_unsp_dllp_rcvd;		/* 0x00z508 */
+		u64	ce_lsi_rcv_lk_ts_ctl;		/* 0x00z510 */
+		u64	ce_pad_00z518[29];  /* 0x00z518 -- 0x00z5F8 */
+		u64	ce_lsi_phy_lyr_cfg;		/* 0x00z600 */
+		u64	ce_pad_00z608;			/* 0x00z608 */
+		u64	ce_lsi_phy_lyr_int_stat;	/* 0x00z610 */
+		u64	ce_lsi_phy_lyr_int_stat_test;	/* 0x00z618 */
+		u64	ce_lsi_phy_lyr_int_mask;	/* 0x00z620 */
+		u64	ce_pad_00z628[11];  /* 0x00z628 -- 0x00z678 */
+		u64	ce_lsi_rcv_phy_cfg;		/* 0x00z680 */
+		u64	ce_lsi_rcv_phy_stat1;		/* 0x00z688 */
+		u64	ce_lsi_rcv_phy_stat2;		/* 0x00z690 */
+		u64	ce_lsi_rcv_phy_stat3;		/* 0x00z698 */
+		u64	ce_lsi_rcv_phy_int_stat;	/* 0x00z6A0 */
+		u64	ce_lsi_rcv_phy_int_stat_test;	/* 0x00z6A8 */
+		u64	ce_lsi_rcv_phy_int_mask;	/* 0x00z6B0 */
+		u64	ce_pad_00z6B8[9];   /* 0x00z6B8 -- 0x00z6F8 */
+		u64	ce_lsi_tx_phy_cfg;		/* 0x00z700 */
+		u64	ce_lsi_tx_phy_stat;		/* 0x00z708 */
+		u64	ce_lsi_tx_phy_int_stat;		/* 0x00z710 */
+		u64	ce_lsi_tx_phy_int_stat_test;	/* 0x00z718 */
+		u64	ce_lsi_tx_phy_int_mask;		/* 0x00z720 */
+		u64	ce_lsi_tx_phy_stat2;		/* 0x00z728 */
+		u64	ce_pad_00z730[10];  /* 0x00z730 -- 0x00z77F */
+		u64	ce_lsi_ltssm_cfg1;		/* 0x00z780 */
+		u64	ce_lsi_ltssm_cfg2;		/* 0x00z788 */
+		u64	ce_lsi_ltssm_cfg3;		/* 0x00z790 */
+		u64	ce_lsi_ltssm_cfg4;		/* 0x00z798 */
+		u64	ce_lsi_ltssm_cfg5;		/* 0x00z7A0 */
+		u64	ce_lsi_ltssm_stat1;		/* 0x00z7A8 */
+		u64	ce_lsi_ltssm_stat2;		/* 0x00z7B0 */
+		u64	ce_lsi_ltssm_int_stat;		/* 0x00z7B8 */
+		u64	ce_lsi_ltssm_int_stat_test;	/* 0x00z7C0 */
+		u64	ce_lsi_ltssm_int_mask;		/* 0x00z7C8 */
+		u64	ce_lsi_ltssm_stat_wr_en;	/* 0x00z7D0 */
+		u64	ce_pad_00z7D8[5];   /* 0x00z7D8 -- 0x00z7F8 */
+		u64	ce_lsi_gb_cfg1;			/* 0x00z800 */
+		u64	ce_lsi_gb_cfg2;			/* 0x00z808 */
+		u64	ce_lsi_gb_cfg3;			/* 0x00z810 */
+		u64	ce_lsi_gb_cfg4;			/* 0x00z818 */
+		u64	ce_lsi_gb_stat;			/* 0x00z820 */
+		u64	ce_lsi_gb_int_stat;		/* 0x00z828 */
+		u64	ce_lsi_gb_int_stat_test;	/* 0x00z830 */
+		u64	ce_lsi_gb_int_mask;		/* 0x00z838 */
+		u64	ce_lsi_gb_pwr_dn1;		/* 0x00z840 */
+		u64	ce_lsi_gb_pwr_dn2;		/* 0x00z848 */
+		u64	ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */
+	} ce_lsi[2];
+
+	u64	ce_pad_004000[10];	    /* 0x004000 -- 0x004048 */
+
+	/*
+	 * CRM: Coretalk Receive Module Registers
+	 */
+	u64	ce_crm_debug_mux;			/* 0x004050 */
+	u64	ce_pad_004058;				/* 0x004058 */
+	u64	ce_crm_ssp_err_cmd_wrd;			/* 0x004060 */
+	u64	ce_crm_ssp_err_addr;			/* 0x004068 */
+	u64	ce_crm_ssp_err_syn;			/* 0x004070 */
+
+	u64	ce_pad_004078[499];	    /* 0x004078 -- 0x005008 */
+
+	/*
+         * CXM: Coretalk Xmit Module Registers
+         */
+	u64	ce_cxm_dyn_credit_status;		/* 0x005010 */
+	u64	ce_cxm_last_credit_status;		/* 0x005018 */
+	u64	ce_cxm_credit_limit;			/* 0x005020 */
+	u64	ce_cxm_force_credit;			/* 0x005028 */
+	u64	ce_cxm_disable_bypass;			/* 0x005030 */
+	u64	ce_pad_005038[3];	    /* 0x005038 -- 0x005048 */
+	u64	ce_cxm_debug_mux;			/* 0x005050 */
+
+        u64        ce_pad_005058[501];         /* 0x005058 -- 0x005FF8 */
+
+	/*
+	 * DTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
+	 * DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000
+	 * DTL: the comment offsets at far right: let 'y' = {6 or 8}
+	 *
+	 * UTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
+	 * UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000
+	 * UTL: the comment offsets at far right: let 'z' = {7 or 9}
+	 */
+	#define ce_dtl(link_num)	ce_dtl_utl[link_num-1]
+	#define ce_utl(link_num)	ce_dtl_utl[link_num-1]
+	struct ce_dtl_utl_reg {
+		/* DTL */
+		u64	ce_dtl_dtdr_credit_limit;	/* 0x00y000 */
+		u64	ce_dtl_dtdr_credit_force;	/* 0x00y008 */
+		u64	ce_dtl_dyn_credit_status;	/* 0x00y010 */
+		u64	ce_dtl_dtl_last_credit_stat;	/* 0x00y018 */
+		u64	ce_dtl_dtl_ctrl;		/* 0x00y020 */
+		u64	ce_pad_00y028[5];   /* 0x00y028 -- 0x00y048 */
+		u64	ce_dtl_debug_sel;		/* 0x00y050 */
+		u64	ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */
+
+		/* UTL */
+		u64	ce_utl_utl_ctrl;		/* 0x00z000 */
+		u64	ce_utl_debug_sel;		/* 0x00z008 */
+		u64	ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */
+	} ce_dtl_utl[2];
+
+	u64	ce_pad_00A000[514];	    /* 0x00A000 -- 0x00B008 */
+
+	/*
+	 * URE: Upstream Request Engine
+         */
+	u64	ce_ure_dyn_credit_status;		/* 0x00B010 */
+	u64	ce_ure_last_credit_status;		/* 0x00B018 */
+	u64	ce_ure_credit_limit;			/* 0x00B020 */
+	u64	ce_pad_00B028;				/* 0x00B028 */
+	u64	ce_ure_control;				/* 0x00B030 */
+	u64	ce_ure_status;				/* 0x00B038 */
+	u64	ce_pad_00B040[2];	    /* 0x00B040 -- 0x00B048 */
+	u64	ce_ure_debug_sel;			/* 0x00B050 */
+	u64	ce_ure_pcie_debug_sel;			/* 0x00B058 */
+	u64	ce_ure_ssp_err_cmd_wrd;			/* 0x00B060 */
+	u64	ce_ure_ssp_err_addr;			/* 0x00B068 */
+	u64	ce_ure_page_map;			/* 0x00B070 */
+	u64	ce_ure_dir_map[TIOCE_NUM_PORTS];	/* 0x00B078 */
+	u64	ce_ure_pipe_sel1;			/* 0x00B088 */
+	u64	ce_ure_pipe_mask1;			/* 0x00B090 */
+	u64	ce_ure_pipe_sel2;			/* 0x00B098 */
+	u64	ce_ure_pipe_mask2;			/* 0x00B0A0 */
+	u64	ce_ure_pcie1_credits_sent;		/* 0x00B0A8 */
+	u64	ce_ure_pcie1_credits_used;		/* 0x00B0B0 */
+	u64	ce_ure_pcie1_credit_limit;		/* 0x00B0B8 */
+	u64	ce_ure_pcie2_credits_sent;		/* 0x00B0C0 */
+	u64	ce_ure_pcie2_credits_used;		/* 0x00B0C8 */
+	u64	ce_ure_pcie2_credit_limit;		/* 0x00B0D0 */
+	u64	ce_ure_pcie_force_credit;		/* 0x00B0D8 */
+	u64	ce_ure_rd_tnum_val;			/* 0x00B0E0 */
+	u64	ce_ure_rd_tnum_rsp_rcvd;		/* 0x00B0E8 */
+	u64	ce_ure_rd_tnum_esent_timer;		/* 0x00B0F0 */
+	u64	ce_ure_rd_tnum_error;			/* 0x00B0F8 */
+	u64	ce_ure_rd_tnum_first_cl;		/* 0x00B100 */
+	u64	ce_ure_rd_tnum_link_buf;		/* 0x00B108 */
+	u64	ce_ure_wr_tnum_val;			/* 0x00B110 */
+	u64	ce_ure_sram_err_addr0;			/* 0x00B118 */
+	u64	ce_ure_sram_err_addr1;			/* 0x00B120 */
+	u64	ce_ure_sram_err_addr2;			/* 0x00B128 */
+	u64	ce_ure_sram_rd_addr0;			/* 0x00B130 */
+	u64	ce_ure_sram_rd_addr1;			/* 0x00B138 */
+	u64	ce_ure_sram_rd_addr2;			/* 0x00B140 */
+	u64	ce_ure_sram_wr_addr0;			/* 0x00B148 */
+	u64	ce_ure_sram_wr_addr1;			/* 0x00B150 */
+	u64	ce_ure_sram_wr_addr2;			/* 0x00B158 */
+	u64	ce_ure_buf_flush10;			/* 0x00B160 */
+	u64	ce_ure_buf_flush11;			/* 0x00B168 */
+	u64	ce_ure_buf_flush12;			/* 0x00B170 */
+	u64	ce_ure_buf_flush13;			/* 0x00B178 */
+	u64	ce_ure_buf_flush20;			/* 0x00B180 */
+	u64	ce_ure_buf_flush21;			/* 0x00B188 */
+	u64	ce_ure_buf_flush22;			/* 0x00B190 */
+	u64	ce_ure_buf_flush23;			/* 0x00B198 */
+	u64	ce_ure_pcie_control1;			/* 0x00B1A0 */
+	u64	ce_ure_pcie_control2;			/* 0x00B1A8 */
+
+	u64	ce_pad_00B1B0[458];	    /* 0x00B1B0 -- 0x00BFF8 */
+
+	/* Upstream Data Buffer, Port1 */
+	struct ce_ure_maint_ups_dat1_data {
+		u64	data63_0[512];	    /* 0x00C000 -- 0x00CFF8 */
+		u64	data127_64[512];    /* 0x00D000 -- 0x00DFF8 */
+		u64	parity[512];	    /* 0x00E000 -- 0x00EFF8 */
+	} ce_ure_maint_ups_dat1;
+
+	/* Upstream Header Buffer, Port1 */
+	struct ce_ure_maint_ups_hdr1_data {
+		u64	data63_0[512];	    /* 0x00F000 -- 0x00FFF8 */
+		u64	data127_64[512];    /* 0x010000 -- 0x010FF8 */
+		u64	parity[512];	    /* 0x011000 -- 0x011FF8 */
+	} ce_ure_maint_ups_hdr1;
+
+	/* Upstream Data Buffer, Port2 */
+	struct ce_ure_maint_ups_dat2_data {
+		u64	data63_0[512];	    /* 0x012000 -- 0x012FF8 */
+		u64	data127_64[512];    /* 0x013000 -- 0x013FF8 */
+		u64	parity[512];	    /* 0x014000 -- 0x014FF8 */
+	} ce_ure_maint_ups_dat2;
+
+	/* Upstream Header Buffer, Port2 */
+	struct ce_ure_maint_ups_hdr2_data {
+		u64	data63_0[512];	    /* 0x015000 -- 0x015FF8 */
+		u64	data127_64[512];    /* 0x016000 -- 0x016FF8 */
+		u64	parity[512];	    /* 0x017000 -- 0x017FF8 */
+	} ce_ure_maint_ups_hdr2;
+
+	/* Downstream Data Buffer */
+	struct ce_ure_maint_dns_dat_data {
+		u64	data63_0[512];	    /* 0x018000 -- 0x018FF8 */
+		u64	data127_64[512];    /* 0x019000 -- 0x019FF8 */
+		u64	parity[512];	    /* 0x01A000 -- 0x01AFF8 */
+	} ce_ure_maint_dns_dat;
+
+	/* Downstream Header Buffer */
+	struct	ce_ure_maint_dns_hdr_data {
+		u64	data31_0[64];	    /* 0x01B000 -- 0x01B1F8 */
+		u64	data95_32[64];	    /* 0x01B200 -- 0x01B3F8 */
+		u64	parity[64];	    /* 0x01B400 -- 0x01B5F8 */
+	} ce_ure_maint_dns_hdr;
+
+	/* RCI Buffer Data */
+	struct	ce_ure_maint_rci_data {
+		u64	data41_0[64];	    /* 0x01B600 -- 0x01B7F8 */
+		u64	data69_42[64];	    /* 0x01B800 -- 0x01B9F8 */
+	} ce_ure_maint_rci;
+
+	/* Response Queue */
+	u64	ce_ure_maint_rspq[64];	    /* 0x01BA00 -- 0x01BBF8 */
+
+	u64	ce_pad_01C000[4224];	    /* 0x01BC00 -- 0x023FF8 */
+
+	/* Admin Build-a-Packet Buffer */
+	struct	ce_adm_maint_bap_buf_data {
+		u64	data63_0[258];	    /* 0x024000 -- 0x024808 */
+		u64	data127_64[258];    /* 0x024810 -- 0x025018 */
+		u64	parity[258];	    /* 0x025020 -- 0x025828 */
+	} ce_adm_maint_bap_buf;
+
+	u64	ce_pad_025830[5370];	    /* 0x025830 -- 0x02FFF8 */
+
+	/* URE: 40bit PMU ATE Buffer */		    /* 0x030000 -- 0x037FF8 */
+	u64	ce_ure_ate40[TIOCE_NUM_M40_ATES];
+
+	/* URE: 32/40bit PMU ATE Buffer */	    /* 0x038000 -- 0x03BFF8 */
+	u64	ce_ure_ate3240[TIOCE_NUM_M3240_ATES];
+
+	u64	ce_pad_03C000[2050];	    /* 0x03C000 -- 0x040008 */
+
+	/*
+	 * DRE: Down Stream Request Engine
+         */
+	u64	ce_dre_dyn_credit_status1;		/* 0x040010 */
+	u64	ce_dre_dyn_credit_status2;		/* 0x040018 */
+	u64	ce_dre_last_credit_status1;		/* 0x040020 */
+	u64	ce_dre_last_credit_status2;		/* 0x040028 */
+	u64	ce_dre_credit_limit1;			/* 0x040030 */
+	u64	ce_dre_credit_limit2;			/* 0x040038 */
+	u64	ce_dre_force_credit1;			/* 0x040040 */
+	u64	ce_dre_force_credit2;			/* 0x040048 */
+	u64	ce_dre_debug_mux1;			/* 0x040050 */
+	u64	ce_dre_debug_mux2;			/* 0x040058 */
+	u64	ce_dre_ssp_err_cmd_wrd;			/* 0x040060 */
+	u64	ce_dre_ssp_err_addr;			/* 0x040068 */
+	u64	ce_dre_comp_err_cmd_wrd;		/* 0x040070 */
+	u64	ce_dre_comp_err_addr;			/* 0x040078 */
+	u64	ce_dre_req_status;			/* 0x040080 */
+	u64	ce_dre_config1;				/* 0x040088 */
+	u64	ce_dre_config2;				/* 0x040090 */
+	u64	ce_dre_config_req_status;		/* 0x040098 */
+	u64	ce_pad_0400A0[12];	    /* 0x0400A0 -- 0x0400F8 */
+	u64	ce_dre_dyn_fifo;			/* 0x040100 */
+	u64	ce_pad_040108[3];	    /* 0x040108 -- 0x040118 */
+	u64	ce_dre_last_fifo;			/* 0x040120 */
+
+	u64	ce_pad_040128[27];	    /* 0x040128 -- 0x0401F8 */
+
+	/* DRE Downstream Head Queue */
+	struct	ce_dre_maint_ds_head_queue {
+		u64	data63_0[32];	    /* 0x040200 -- 0x0402F8 */
+		u64	data127_64[32];	    /* 0x040300 -- 0x0403F8 */
+		u64	parity[32];	    /* 0x040400 -- 0x0404F8 */
+	} ce_dre_maint_ds_head_q;
+
+	u64	ce_pad_040500[352];	    /* 0x040500 -- 0x040FF8 */
+
+	/* DRE Downstream Data Queue */
+	struct	ce_dre_maint_ds_data_queue {
+		u64	data63_0[256];	    /* 0x041000 -- 0x0417F8 */
+		u64	ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */
+		u64	data127_64[256];    /* 0x042000 -- 0x0427F8 */
+		u64	ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */
+		u64	parity[256];	    /* 0x043000 -- 0x0437F8 */
+		u64	ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */
+	} ce_dre_maint_ds_data_q;
+
+	/* DRE URE Upstream Response Queue */
+	struct	ce_dre_maint_ure_us_rsp_queue {
+		u64	data63_0[8];	    /* 0x044000 -- 0x044038 */
+		u64	ce_pad_044040[24];  /* 0x044040 -- 0x0440F8 */
+		u64	data127_64[8];      /* 0x044100 -- 0x044138 */
+		u64	ce_pad_044140[24];  /* 0x044140 -- 0x0441F8 */
+		u64	parity[8];	    /* 0x044200 -- 0x044238 */
+		u64	ce_pad_044240[24];  /* 0x044240 -- 0x0442F8 */
+	} ce_dre_maint_ure_us_rsp_q;
+
+	u64 	ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */
+
+	u64	ce_end_of_struct;			/* 0x044400 */
+} tioce_t;
+
+/* ce_lsiX_gb_cfg1 register bit masks & shifts */
+#define CE_LSI_GB_CFG1_RXL0S_THS_SHFT	0
+#define CE_LSI_GB_CFG1_RXL0S_THS_MASK	(0xffULL << 0)
+#define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT	8
+#define CE_LSI_GB_CFG1_RXL0S_SMP_MASK	(0xfULL << 8)
+#define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT	12
+#define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK	(0x7ULL << 12)
+#define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT	15
+#define CE_LSI_GB_CFG1_RXL0S_FLT_MASK	(0x1ULL << 15)
+#define CE_LSI_GB_CFG1_LPBK_SEL_SHFT	16
+#define CE_LSI_GB_CFG1_LPBK_SEL_MASK	(0x3ULL << 16)
+#define CE_LSI_GB_CFG1_LPBK_EN_SHFT	18
+#define CE_LSI_GB_CFG1_LPBK_EN_MASK	(0x1ULL << 18)
+#define CE_LSI_GB_CFG1_RVRS_LB_SHFT	19
+#define CE_LSI_GB_CFG1_RVRS_LB_MASK	(0x1ULL << 19)
+#define CE_LSI_GB_CFG1_RVRS_CLK_SHFT	20
+#define CE_LSI_GB_CFG1_RVRS_CLK_MASK	(0x3ULL << 20)
+#define CE_LSI_GB_CFG1_SLF_TS_SHFT	24
+#define CE_LSI_GB_CFG1_SLF_TS_MASK	(0xfULL << 24)
+
+/* ce_adm_int_mask/ce_adm_int_status register bit defines */
+#define CE_ADM_INT_CE_ERROR_SHFT		0
+#define CE_ADM_INT_LSI1_IP_ERROR_SHFT		1
+#define CE_ADM_INT_LSI2_IP_ERROR_SHFT		2
+#define CE_ADM_INT_PCIE_ERROR_SHFT		3
+#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT	4
+#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT	5
+#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT	6
+#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT	7
+#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT	8
+#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT	9
+#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT	10
+#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT	11
+#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT	12
+#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT	13
+#define CE_ADM_INT_PCIE_MSG_SHFT		14 /*see int_dest_14*/
+#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT		14
+#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT		15
+#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT		16
+#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT		17
+#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT	22
+#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT	23
+
+/* ce_adm_force_int register bit defines */
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT	0
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT	1
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT	2
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT	3
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT	4
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT	5
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT	6
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT	7
+#define CE_ADM_FORCE_INT_ALWAYS_SHFT		8
+
+/* ce_adm_int_dest register bit masks & shifts */
+#define INTR_VECTOR_SHFT			56
+
+/* ce_adm_error_mask and ce_adm_error_summary register bit masks */
+#define CE_ADM_ERR_CRM_SSP_REQ_INVALID			(0x1ULL <<  0)
+#define CE_ADM_ERR_SSP_REQ_HEADER			(0x1ULL <<  1)
+#define CE_ADM_ERR_SSP_RSP_HEADER			(0x1ULL <<  2)
+#define CE_ADM_ERR_SSP_PROTOCOL_ERROR			(0x1ULL <<  3)
+#define CE_ADM_ERR_SSP_SBE				(0x1ULL <<  4)
+#define CE_ADM_ERR_SSP_MBE				(0x1ULL <<  5)
+#define CE_ADM_ERR_CXM_CREDIT_OFLOW			(0x1ULL <<  6)
+#define CE_ADM_ERR_DRE_SSP_REQ_INVAL			(0x1ULL <<  7)
+#define CE_ADM_ERR_SSP_REQ_LONG				(0x1ULL <<  8)
+#define CE_ADM_ERR_SSP_REQ_OFLOW			(0x1ULL <<  9)
+#define CE_ADM_ERR_SSP_REQ_SHORT			(0x1ULL << 10)
+#define CE_ADM_ERR_SSP_REQ_SIDEBAND			(0x1ULL << 11)
+#define CE_ADM_ERR_SSP_REQ_ADDR_ERR			(0x1ULL << 12)
+#define CE_ADM_ERR_SSP_REQ_BAD_BE			(0x1ULL << 13)
+#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT			(0x1ULL << 14)
+#define CE_ADM_ERR_PCIE_UNEXP_COMPL			(0x1ULL << 15)
+#define CE_ADM_ERR_PCIE_ERR_COMPL			(0x1ULL << 16)
+#define CE_ADM_ERR_DRE_CREDIT_OFLOW			(0x1ULL << 17)
+#define CE_ADM_ERR_DRE_SRAM_PE				(0x1ULL << 18)
+#define CE_ADM_ERR_SSP_RSP_INVALID			(0x1ULL << 19)
+#define CE_ADM_ERR_SSP_RSP_LONG				(0x1ULL << 20)
+#define CE_ADM_ERR_SSP_RSP_SHORT			(0x1ULL << 21)
+#define CE_ADM_ERR_SSP_RSP_SIDEBAND			(0x1ULL << 22)
+#define CE_ADM_ERR_URE_SSP_RSP_UNEXP			(0x1ULL << 23)
+#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT		(0x1ULL << 24)
+#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT		(0x1ULL << 25)
+#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT		(0x1ULL << 26)
+#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT			(0x1ULL << 27)
+#define CE_ADM_ERR_URE_CREDIT_OFLOW			(0x1ULL << 28)
+#define CE_ADM_ERR_URE_SRAM_PE				(0x1ULL << 29)
+#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP			(0x1ULL << 30)
+#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT			(0x1ULL << 31)
+#define CE_ADM_ERR_MMR_ACCESS_ERROR			(0x1ULL << 32)
+#define CE_ADM_ERR_MMR_ADDR_ERROR			(0x1ULL << 33)
+#define CE_ADM_ERR_ADM_CREDIT_OFLOW			(0x1ULL << 34)
+#define CE_ADM_ERR_ADM_SRAM_PE				(0x1ULL << 35)
+#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR		(0x1ULL << 36)
+#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR		(0x1ULL << 37)
+#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR	(0x1ULL << 38)
+#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR	(0x1ULL << 39)
+#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR		(0x1ULL << 40)
+#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR		(0x1ULL << 41)
+#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR		(0x1ULL << 42)
+#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR		(0x1ULL << 43)
+#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR		(0x1ULL << 44)
+#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR		(0x1ULL << 45)
+#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR		(0x1ULL << 46)
+#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR		(0x1ULL << 47)
+#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR	(0x1ULL << 48)
+#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR	(0x1ULL << 49)
+#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR		(0x1ULL << 50)
+#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR		(0x1ULL << 51)
+#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR		(0x1ULL << 52)
+#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR		(0x1ULL << 53)
+#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR		(0x1ULL << 54)
+#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR		(0x1ULL << 55)
+#define CE_ADM_ERR_PORT1_PCIE_COR_ERR			(0x1ULL << 56)
+#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR			(0x1ULL << 57)
+#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR			(0x1ULL << 58)
+#define CE_ADM_ERR_PORT2_PCIE_COR_ERR			(0x1ULL << 59)
+#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR			(0x1ULL << 60)
+#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR			(0x1ULL << 61)
+
+/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */
+#define FLUSH_SEL_PORT1_PIPE0_SHFT	0
+#define FLUSH_SEL_PORT1_PIPE1_SHFT	4
+#define FLUSH_SEL_PORT1_PIPE2_SHFT	8
+#define FLUSH_SEL_PORT1_PIPE3_SHFT	12
+#define FLUSH_SEL_PORT2_PIPE0_SHFT	16
+#define FLUSH_SEL_PORT2_PIPE1_SHFT	20
+#define FLUSH_SEL_PORT2_PIPE2_SHFT	24
+#define FLUSH_SEL_PORT2_PIPE3_SHFT	28
+
+/* ce_dre_config1 register bit masks and shifts */
+#define CE_DRE_RO_ENABLE		(0x1ULL << 0)
+#define CE_DRE_DYN_RO_ENABLE		(0x1ULL << 1)
+#define CE_DRE_SUP_CONFIG_COMP_ERROR	(0x1ULL << 2)
+#define CE_DRE_SUP_IO_COMP_ERROR	(0x1ULL << 3)
+#define CE_DRE_ADDR_MODE_SHFT		4
+
+/* ce_dre_config_req_status register bit masks */
+#define CE_DRE_LAST_CONFIG_COMPLETION	(0x7ULL << 0)
+#define CE_DRE_DOWNSTREAM_CONFIG_ERROR	(0x1ULL << 3)
+#define CE_DRE_CONFIG_COMPLETION_VALID	(0x1ULL << 4)
+#define CE_DRE_CONFIG_REQUEST_ACTIVE	(0x1ULL << 5)
+
+/* ce_ure_control register bit masks & shifts */
+#define CE_URE_RD_MRG_ENABLE		(0x1ULL << 0)
+#define CE_URE_WRT_MRG_ENABLE1		(0x1ULL << 4)
+#define CE_URE_WRT_MRG_ENABLE2		(0x1ULL << 5)
+#define CE_URE_WRT_MRG_TIMER_SHFT	12
+#define CE_URE_WRT_MRG_TIMER_MASK	(0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT)
+#define CE_URE_WRT_MRG_TIMER(x)		(((u64)(x) << \
+					  CE_URE_WRT_MRG_TIMER_SHFT) & \
+					 CE_URE_WRT_MRG_TIMER_MASK)
+#define CE_URE_RSPQ_BYPASS_DISABLE	(0x1ULL << 24)
+#define CE_URE_UPS_DAT1_PAR_DISABLE	(0x1ULL << 32)
+#define CE_URE_UPS_HDR1_PAR_DISABLE	(0x1ULL << 33)
+#define CE_URE_UPS_DAT2_PAR_DISABLE	(0x1ULL << 34)
+#define CE_URE_UPS_HDR2_PAR_DISABLE	(0x1ULL << 35)
+#define CE_URE_ATE_PAR_DISABLE		(0x1ULL << 36)
+#define CE_URE_RCI_PAR_DISABLE		(0x1ULL << 37)
+#define CE_URE_RSPQ_PAR_DISABLE		(0x1ULL << 38)
+#define CE_URE_DNS_DAT_PAR_DISABLE	(0x1ULL << 39)
+#define CE_URE_DNS_HDR_PAR_DISABLE	(0x1ULL << 40)
+#define CE_URE_MALFORM_DISABLE		(0x1ULL << 44)
+#define CE_URE_UNSUP_DISABLE		(0x1ULL << 45)
+
+/* ce_ure_page_map register bit masks & shifts */
+#define CE_URE_ATE3240_ENABLE		(0x1ULL << 0)
+#define CE_URE_ATE40_ENABLE 		(0x1ULL << 1)
+#define CE_URE_PAGESIZE_SHFT		4
+#define CE_URE_PAGESIZE_MASK		(0x7ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_4K_PAGESIZE		(0x0ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_16K_PAGESIZE		(0x1ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_64K_PAGESIZE		(0x2ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_128K_PAGESIZE		(0x3ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_256K_PAGESIZE		(0x4ULL << CE_URE_PAGESIZE_SHFT)
+
+/* ce_ure_pipe_sel register bit masks & shifts */
+#define PKT_TRAFIC_SHRT			16
+#define BUS_SRC_ID_SHFT			8
+#define DEV_SRC_ID_SHFT			3
+#define FNC_SRC_ID_SHFT			0
+#define CE_URE_TC_MASK			(0x07ULL << PKT_TRAFIC_SHRT)
+#define CE_URE_BUS_MASK			(0xFFULL << BUS_SRC_ID_SHFT)
+#define CE_URE_DEV_MASK			(0x1FULL << DEV_SRC_ID_SHFT)
+#define CE_URE_FNC_MASK			(0x07ULL << FNC_SRC_ID_SHFT)
+#define CE_URE_PIPE_BUS(b)		(((u64)(b) << BUS_SRC_ID_SHFT) & \
+					 CE_URE_BUS_MASK)
+#define CE_URE_PIPE_DEV(d)		(((u64)(d) << DEV_SRC_ID_SHFT) & \
+					 CE_URE_DEV_MASK)
+#define CE_URE_PIPE_FNC(f)		(((u64)(f) << FNC_SRC_ID_SHFT) & \
+					 CE_URE_FNC_MASK)
+
+#define CE_URE_SEL1_SHFT		0
+#define CE_URE_SEL2_SHFT		20
+#define CE_URE_SEL3_SHFT		40
+#define CE_URE_SEL1_MASK		(0x7FFFFULL << CE_URE_SEL1_SHFT)
+#define CE_URE_SEL2_MASK		(0x7FFFFULL << CE_URE_SEL2_SHFT)
+#define CE_URE_SEL3_MASK		(0x7FFFFULL << CE_URE_SEL3_SHFT)
+
+
+/* ce_ure_pipe_mask register bit masks & shifts */
+#define CE_URE_MASK1_SHFT		0
+#define CE_URE_MASK2_SHFT		20
+#define CE_URE_MASK3_SHFT		40
+#define CE_URE_MASK1_MASK		(0x7FFFFULL << CE_URE_MASK1_SHFT)
+#define CE_URE_MASK2_MASK		(0x7FFFFULL << CE_URE_MASK2_SHFT)
+#define CE_URE_MASK3_MASK		(0x7FFFFULL << CE_URE_MASK3_SHFT)
+
+
+/* ce_ure_pcie_control1 register bit masks & shifts */
+#define CE_URE_SI			(0x1ULL << 0)
+#define CE_URE_ELAL_SHFT		4
+#define CE_URE_ELAL_MASK		(0x7ULL << CE_URE_ELAL_SHFT)
+#define CE_URE_ELAL_SET(n)		(((u64)(n) << CE_URE_ELAL_SHFT) & \
+					 CE_URE_ELAL_MASK)
+#define CE_URE_ELAL1_SHFT		8
+#define CE_URE_ELAL1_MASK		(0x7ULL << CE_URE_ELAL1_SHFT)
+#define CE_URE_ELAL1_SET(n)		(((u64)(n) << CE_URE_ELAL1_SHFT) & \
+					 CE_URE_ELAL1_MASK)
+#define CE_URE_SCC			(0x1ULL << 12)
+#define CE_URE_PN1_SHFT			16
+#define CE_URE_PN1_MASK			(0xFFULL << CE_URE_PN1_SHFT)
+#define CE_URE_PN2_SHFT			24
+#define CE_URE_PN2_MASK			(0xFFULL << CE_URE_PN2_SHFT)
+#define CE_URE_PN1_SET(n)		(((u64)(n) << CE_URE_PN1_SHFT) & \
+					 CE_URE_PN1_MASK)
+#define CE_URE_PN2_SET(n)		(((u64)(n) << CE_URE_PN2_SHFT) & \
+					 CE_URE_PN2_MASK)
+
+/* ce_ure_pcie_control2 register bit masks & shifts */
+#define CE_URE_ABP			(0x1ULL << 0)
+#define CE_URE_PCP			(0x1ULL << 1)
+#define CE_URE_MSP			(0x1ULL << 2)
+#define CE_URE_AIP			(0x1ULL << 3)
+#define CE_URE_PIP			(0x1ULL << 4)
+#define CE_URE_HPS			(0x1ULL << 5)
+#define CE_URE_HPC			(0x1ULL << 6)
+#define CE_URE_SPLV_SHFT		7
+#define CE_URE_SPLV_MASK		(0xFFULL << CE_URE_SPLV_SHFT)
+#define CE_URE_SPLV_SET(n)		(((u64)(n) << CE_URE_SPLV_SHFT) & \
+					 CE_URE_SPLV_MASK)
+#define CE_URE_SPLS_SHFT		15
+#define CE_URE_SPLS_MASK		(0x3ULL << CE_URE_SPLS_SHFT)
+#define CE_URE_SPLS_SET(n)		(((u64)(n) << CE_URE_SPLS_SHFT) & \
+					 CE_URE_SPLS_MASK)
+#define CE_URE_PSN1_SHFT		19
+#define CE_URE_PSN1_MASK		(0x1FFFULL << CE_URE_PSN1_SHFT)
+#define CE_URE_PSN2_SHFT		32
+#define CE_URE_PSN2_MASK		(0x1FFFULL << CE_URE_PSN2_SHFT)
+#define CE_URE_PSN1_SET(n)		(((u64)(n) << CE_URE_PSN1_SHFT) & \
+					 CE_URE_PSN1_MASK)
+#define CE_URE_PSN2_SET(n)		(((u64)(n) << CE_URE_PSN2_SHFT) & \
+					 CE_URE_PSN2_MASK)
+
+/*
+ * PIO address space ranges for CE
+ */
+
+/* Local CE Registers Space */
+#define CE_PIO_MMR			0x00000000
+#define CE_PIO_MMR_LEN			0x04000000
+
+/* PCI Compatible Config Space */
+#define CE_PIO_CONFIG_SPACE		0x04000000
+#define CE_PIO_CONFIG_SPACE_LEN		0x04000000
+
+/* PCI I/O Space Alias */
+#define CE_PIO_IO_SPACE_ALIAS		0x08000000
+#define CE_PIO_IO_SPACE_ALIAS_LEN	0x08000000
+
+/* PCI Enhanced Config Space */
+#define CE_PIO_E_CONFIG_SPACE		0x10000000
+#define CE_PIO_E_CONFIG_SPACE_LEN	0x10000000
+
+/* PCI I/O Space */
+#define CE_PIO_IO_SPACE			0x100000000
+#define CE_PIO_IO_SPACE_LEN		0x100000000
+
+/* PCI MEM Space */
+#define CE_PIO_MEM_SPACE		0x200000000
+#define CE_PIO_MEM_SPACE_LEN		TIO_HWIN_SIZE
+
+
+/*
+ * CE PCI Enhanced Config Space shifts & masks
+ */
+#define CE_E_CONFIG_BUS_SHFT		20
+#define CE_E_CONFIG_BUS_MASK		(0xFF << CE_E_CONFIG_BUS_SHFT)
+#define CE_E_CONFIG_DEVICE_SHFT		15
+#define CE_E_CONFIG_DEVICE_MASK		(0x1F << CE_E_CONFIG_DEVICE_SHFT)
+#define CE_E_CONFIG_FUNC_SHFT		12
+#define CE_E_CONFIG_FUNC_MASK		(0x7  << CE_E_CONFIG_FUNC_SHFT)
+
+#endif /* __ASM_IA64_SN_TIOCE_H__ */
diff --git a/arch/ia64/include/asm/sn/tioce_provider.h b/arch/ia64/include/asm/sn/tioce_provider.h
new file mode 100644
index 0000000..32c32f3
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioce_provider.h
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_CE_PROVIDER_H
+#define _ASM_IA64_SN_CE_PROVIDER_H
+
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioce.h>
+
+/*
+ * Common TIOCE structure shared between the prom and kernel
+ *
+ * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE
+ * PROM VERSION.
+ */
+struct tioce_common {
+	struct pcibus_bussoft	ce_pcibus;	/* common pciio header */
+
+	u32		ce_rev;
+	u64		ce_kernel_private;
+	u64		ce_prom_private;
+};
+
+struct tioce_kernel {
+	struct tioce_common	*ce_common;
+	spinlock_t		ce_lock;
+	struct list_head	ce_dmamap_list;
+
+	u64		ce_ate40_shadow[TIOCE_NUM_M40_ATES];
+	u64		ce_ate3240_shadow[TIOCE_NUM_M3240_ATES];
+	u32		ce_ate3240_pagesize;
+
+	u8			ce_port1_secondary;
+
+	/* per-port resources */
+	struct {
+		int 		dirmap_refcnt;
+		u64	dirmap_shadow;
+	} ce_port[TIOCE_NUM_PORTS];
+};
+
+struct tioce_dmamap {
+	struct list_head	ce_dmamap_list;	/* headed by tioce_kernel */
+	u32		refcnt;
+
+	u64		nbytes;		/* # bytes mapped */
+
+	u64		ct_start;	/* coretalk start address */
+	u64		pci_start;	/* bus start address */
+
+	u64		__iomem *ate_hw;/* hw ptr of first ate in map */
+	u64		*ate_shadow;	/* shadow ptr of firat ate */
+	u16		ate_count;	/* # ate's in the map */
+};
+
+extern int tioce_init_provider(void);
+
+#endif  /* __ASM_IA64_SN_CE_PROVIDER_H */
diff --git a/arch/ia64/include/asm/sn/tiocp.h b/arch/ia64/include/asm/sn/tiocp.h
new file mode 100644
index 0000000..e8ad0bb
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tiocp.h
@@ -0,0 +1,257 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_TIOCP_H
+#define _ASM_IA64_SN_PCI_TIOCP_H
+
+#define TIOCP_HOST_INTR_ADDR            0x003FFFFFFFFFFFFFUL
+#define TIOCP_PCI64_CMDTYPE_MEM         (0x1ull << 60)
+#define TIOCP_PCI64_CMDTYPE_MSI         (0x3ull << 60)
+
+
+/*****************************************************************************
+ *********************** TIOCP MMR structure mapping ***************************
+ *****************************************************************************/
+
+struct tiocp{
+
+    /* 0x000000-0x00FFFF -- Local Registers */
+
+    /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
+    u64		cp_id;				/* 0x000000 */
+    u64		cp_stat;			/* 0x000008 */
+    u64		cp_err_upper;			/* 0x000010 */
+    u64		cp_err_lower;			/* 0x000018 */
+    #define cp_err cp_err_lower
+    u64		cp_control;			/* 0x000020 */
+    u64		cp_req_timeout;			/* 0x000028 */
+    u64		cp_intr_upper;			/* 0x000030 */
+    u64		cp_intr_lower;			/* 0x000038 */
+    #define cp_intr cp_intr_lower
+    u64		cp_err_cmdword;			/* 0x000040 */
+    u64		_pad_000048;			/* 0x000048 */
+    u64		cp_tflush;			/* 0x000050 */
+
+    /* 0x000058-0x00007F -- Bridge-specific Configuration */
+    u64		cp_aux_err;			/* 0x000058 */
+    u64		cp_resp_upper;			/* 0x000060 */
+    u64		cp_resp_lower;			/* 0x000068 */
+    #define cp_resp cp_resp_lower
+    u64		cp_tst_pin_ctrl;		/* 0x000070 */
+    u64		cp_addr_lkerr;			/* 0x000078 */
+
+    /* 0x000080-0x00008F -- PMU & MAP */
+    u64		cp_dir_map;			/* 0x000080 */
+    u64		_pad_000088;			/* 0x000088 */
+
+    /* 0x000090-0x00009F -- SSRAM */
+    u64		cp_map_fault;			/* 0x000090 */
+    u64		_pad_000098;			/* 0x000098 */
+
+    /* 0x0000A0-0x0000AF -- Arbitration */
+    u64		cp_arb;				/* 0x0000A0 */
+    u64		_pad_0000A8;			/* 0x0000A8 */
+
+    /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+    u64		cp_ate_parity_err;		/* 0x0000B0 */
+    u64		_pad_0000B8;			/* 0x0000B8 */
+
+    /* 0x0000C0-0x0000FF -- PCI/GIO */
+    u64		cp_bus_timeout;			/* 0x0000C0 */
+    u64		cp_pci_cfg;			/* 0x0000C8 */
+    u64		cp_pci_err_upper;		/* 0x0000D0 */
+    u64		cp_pci_err_lower;		/* 0x0000D8 */
+    #define cp_pci_err cp_pci_err_lower
+    u64		_pad_0000E0[4];			/* 0x0000{E0..F8} */
+
+    /* 0x000100-0x0001FF -- Interrupt */
+    u64		cp_int_status;			/* 0x000100 */
+    u64		cp_int_enable;			/* 0x000108 */
+    u64		cp_int_rst_stat;		/* 0x000110 */
+    u64		cp_int_mode;			/* 0x000118 */
+    u64		cp_int_device;			/* 0x000120 */
+    u64		cp_int_host_err;		/* 0x000128 */
+    u64		cp_int_addr[8];			/* 0x0001{30,,,68} */
+    u64		cp_err_int_view;		/* 0x000170 */
+    u64		cp_mult_int;			/* 0x000178 */
+    u64		cp_force_always[8];		/* 0x0001{80,,,B8} */
+    u64		cp_force_pin[8];		/* 0x0001{C0,,,F8} */
+
+    /* 0x000200-0x000298 -- Device */
+    u64		cp_device[4];			/* 0x0002{00,,,18} */
+    u64		_pad_000220[4];			/* 0x0002{20,,,38} */
+    u64		cp_wr_req_buf[4];		/* 0x0002{40,,,58} */
+    u64		_pad_000260[4];			/* 0x0002{60,,,78} */
+    u64		cp_rrb_map[2];			/* 0x0002{80,,,88} */
+    #define cp_even_resp cp_rrb_map[0]			/* 0x000280 */
+    #define cp_odd_resp  cp_rrb_map[1]			/* 0x000288 */
+    u64		cp_resp_status;			/* 0x000290 */
+    u64		cp_resp_clear;			/* 0x000298 */
+
+    u64		_pad_0002A0[12];		/* 0x0002{A0..F8} */
+
+    /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+    struct {
+	u64	upper;				/* 0x0003{00,,,F0} */
+	u64	lower;				/* 0x0003{08,,,F8} */
+    } cp_buf_addr_match[16];
+
+    /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+    struct {
+	u64	flush_w_touch;			/* 0x000{400,,,5C0} */
+	u64	flush_wo_touch;			/* 0x000{408,,,5C8} */
+	u64	inflight;			/* 0x000{410,,,5D0} */
+	u64	prefetch;			/* 0x000{418,,,5D8} */
+	u64	total_pci_retry;		/* 0x000{420,,,5E0} */
+	u64	max_pci_retry;			/* 0x000{428,,,5E8} */
+	u64	max_latency;			/* 0x000{430,,,5F0} */
+	u64	clear_all;			/* 0x000{438,,,5F8} */
+    } cp_buf_count[8];
+
+
+    /* 0x000600-0x0009FF -- PCI/X registers */
+    u64		cp_pcix_bus_err_addr;		/* 0x000600 */
+    u64		cp_pcix_bus_err_attr;		/* 0x000608 */
+    u64		cp_pcix_bus_err_data;		/* 0x000610 */
+    u64		cp_pcix_pio_split_addr;		/* 0x000618 */
+    u64		cp_pcix_pio_split_attr;		/* 0x000620 */
+    u64		cp_pcix_dma_req_err_attr;	/* 0x000628 */
+    u64		cp_pcix_dma_req_err_addr;	/* 0x000630 */
+    u64		cp_pcix_timeout;		/* 0x000638 */
+
+    u64		_pad_000640[24];		/* 0x000{640,,,6F8} */
+
+    /* 0x000700-0x000737 -- Debug Registers */
+    u64		cp_ct_debug_ctl;		/* 0x000700 */
+    u64		cp_br_debug_ctl;		/* 0x000708 */
+    u64		cp_mux3_debug_ctl;		/* 0x000710 */
+    u64		cp_mux4_debug_ctl;		/* 0x000718 */
+    u64		cp_mux5_debug_ctl;		/* 0x000720 */
+    u64		cp_mux6_debug_ctl;		/* 0x000728 */
+    u64		cp_mux7_debug_ctl;		/* 0x000730 */
+
+    u64		_pad_000738[89];		/* 0x000{738,,,9F8} */
+
+    /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+    struct {
+	u64	cp_buf_addr;			/* 0x000{A00,,,AF0} */
+	u64	cp_buf_attr;			/* 0X000{A08,,,AF8} */
+    } cp_pcix_read_buf_64[16];
+
+    struct {
+	u64	cp_buf_addr;			/* 0x000{B00,,,BE0} */
+	u64	cp_buf_attr;			/* 0x000{B08,,,BE8} */
+	u64	cp_buf_valid;			/* 0x000{B10,,,BF0} */
+	u64	__pad1;				/* 0x000{B18,,,BF8} */
+    } cp_pcix_write_buf_64[8];
+
+    /* End of Local Registers -- Start of Address Map space */
+
+    char	_pad_000c00[0x010000 - 0x000c00];
+
+    /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
+    u64		cp_int_ate_ram[1024];		/* 0x010000-0x011FF8 */
+
+    char	_pad_012000[0x14000 - 0x012000];
+
+    /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
+    u64		cp_int_ate_ram_mp[1024];	/* 0x014000-0x015FF8 */
+
+    char	_pad_016000[0x18000 - 0x016000];
+
+    /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
+    u64		cp_wr_req_lower[256];		/* 0x18000 - 0x187F8 */
+    u64		cp_wr_req_upper[256];		/* 0x18800 - 0x18FF8 */
+    u64		cp_wr_req_parity[256];		/* 0x19000 - 0x197F8 */
+
+    char	_pad_019800[0x1C000 - 0x019800];
+
+    /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
+    u64		cp_rd_resp_lower[512];		/* 0x1C000 - 0x1CFF8 */
+    u64		cp_rd_resp_upper[512];		/* 0x1D000 - 0x1DFF8 */
+    u64		cp_rd_resp_parity[512];		/* 0x1E000 - 0x1EFF8 */
+
+    char	_pad_01F000[0x20000 - 0x01F000];
+
+    /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used)  */
+    char	_pad_020000[0x021000 - 0x20000];
+
+    /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
+    union {
+	u8	c[0x1000 / 1];			/* 0x02{0000,,,7FFF} */
+	u16	s[0x1000 / 2];			/* 0x02{0000,,,7FFF} */
+	u32	l[0x1000 / 4];			/* 0x02{0000,,,7FFF} */
+	u64	d[0x1000 / 8];			/* 0x02{0000,,,7FFF} */
+	union {
+	    u8	c[0x100 / 1];
+	    u16	s[0x100 / 2];
+	    u32	l[0x100 / 4];
+	    u64	d[0x100 / 8];
+	} f[8];
+    } cp_type0_cfg_dev[7];				/* 0x02{1000,,,7FFF} */
+
+    /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+    union {
+	u8	c[0x1000 / 1];			/* 0x028000-0x029000 */
+	u16	s[0x1000 / 2];			/* 0x028000-0x029000 */
+	u32	l[0x1000 / 4];			/* 0x028000-0x029000 */
+	u64	d[0x1000 / 8];			/* 0x028000-0x029000 */
+	union {
+	    u8	c[0x100 / 1];
+	    u16	s[0x100 / 2];
+	    u32	l[0x100 / 4];
+	    u64	d[0x100 / 8];
+	} f[8];
+    } cp_type1_cfg;					/* 0x028000-0x029000 */
+
+    char		_pad_029000[0x030000-0x029000];
+
+    /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+    union {
+	u8	c[8 / 1];
+	u16	s[8 / 2];
+	u32	l[8 / 4];
+	u64	d[8 / 8];
+    } cp_pci_iack;					/* 0x030000-0x030007 */
+
+    char		_pad_030007[0x040000-0x030008];
+
+    /* 0x040000-0x040007 -- PCIX Special Cycle */
+    union {
+	u8	c[8 / 1];
+	u16	s[8 / 2];
+	u32	l[8 / 4];
+	u64	d[8 / 8];
+    } cp_pcix_cycle;					/* 0x040000-0x040007 */
+
+    char		_pad_040007[0x200000-0x040008];
+
+    /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
+    union {
+	u8	c[0x100000 / 1];
+	u16	s[0x100000 / 2];
+	u32	l[0x100000 / 4];
+	u64	d[0x100000 / 8];
+    } cp_devio_raw[6];					/* 0x200000-0x7FFFFF */
+
+    #define cp_devio(n)  cp_devio_raw[((n)<2)?(n*2):(n+2)]
+
+    char		_pad_800000[0xA00000-0x800000];
+
+    /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush  */
+    union {
+	u8	c[0x100000 / 1];
+	u16	s[0x100000 / 2];
+	u32	l[0x100000 / 4];
+	u64	d[0x100000 / 8];
+    } cp_devio_raw_flush[6];				/* 0xA00000-0xBFFFFF */
+
+    #define cp_devio_flush(n)  cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
+
+};
+
+#endif 	/* _ASM_IA64_SN_PCI_TIOCP_H */
diff --git a/arch/ia64/include/asm/sn/tiocx.h b/arch/ia64/include/asm/sn/tiocx.h
new file mode 100644
index 0000000..d297284
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tiocx.h
@@ -0,0 +1,72 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_TIOCX_H
+#define _ASM_IA64_SN_TIO_TIOCX_H
+
+#ifdef __KERNEL__
+
+struct cx_id_s {
+	unsigned int part_num;
+	unsigned int mfg_num;
+	int nasid;
+};
+
+struct cx_dev {
+	struct cx_id_s cx_id;
+	int bt;				/* board/blade type */
+	void *soft;			/* driver specific */
+	struct hubdev_info *hubdev;
+	struct device dev;
+	struct cx_drv *driver;
+};
+
+struct cx_device_id {
+	unsigned int part_num;
+	unsigned int mfg_num;
+};
+
+struct cx_drv {
+	char *name;
+	const struct cx_device_id *id_table;
+	struct device_driver driver;
+	int (*probe) (struct cx_dev * dev, const struct cx_device_id * id);
+	int (*remove) (struct cx_dev * dev);
+};
+
+/* create DMA address by stripping AS bits */
+#define TIOCX_DMA_ADDR(a) (u64)((u64)(a) & 0xffffcfffffffffUL)
+
+#define TIOCX_TO_TIOCX_DMA_ADDR(a) (u64)(((u64)(a) & 0xfffffffff) |  \
+                                  ((((u64)(a)) & 0xffffc000000000UL) <<2))
+
+#define TIO_CE_ASIC_PARTNUM 0xce00
+#define TIOCX_CORELET 3
+
+/* These are taken from tio_mmr_as.h */
+#define TIO_ICE_FRZ_CFG               TIO_MMR_ADDR_MOD(0x00000000b0008100UL)
+#define TIO_ICE_PMI_TX_CFG            TIO_MMR_ADDR_MOD(0x00000000b000b100UL)
+#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3 TIO_MMR_ADDR_MOD(0x00000000b000be18UL)
+#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK 0x000000000000000fUL
+
+#define to_cx_dev(n) container_of(n, struct cx_dev, dev)
+#define to_cx_driver(drv) container_of(drv, struct cx_drv, driver)
+
+extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int);
+extern void tiocx_irq_free(struct sn_irq_info *);
+extern int cx_device_unregister(struct cx_dev *);
+extern int cx_device_register(nasid_t, int, int, struct hubdev_info *, int);
+extern int cx_driver_unregister(struct cx_drv *);
+extern int cx_driver_register(struct cx_drv *);
+extern u64 tiocx_dma_addr(u64 addr);
+extern u64 tiocx_swin_base(int nasid);
+extern void tiocx_mmr_store(int nasid, u64 offset, u64 value);
+extern u64 tiocx_mmr_load(int nasid, u64 offset);
+
+#endif				//  __KERNEL__
+#endif				// _ASM_IA64_SN_TIO_TIOCX__
diff --git a/arch/ia64/include/asm/sn/types.h b/arch/ia64/include/asm/sn/types.h
new file mode 100644
index 0000000..8e04ee2
--- /dev/null
+++ b/arch/ia64/include/asm/sn/types.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_TYPES_H
+#define _ASM_IA64_SN_TYPES_H
+
+#include <linux/types.h>
+
+typedef unsigned long 	cpuid_t;
+typedef signed short	nasid_t;	/* node id in numa-as-id space */
+typedef signed char	partid_t;	/* partition ID type */
+typedef unsigned int    moduleid_t;     /* user-visible module number type */
+typedef unsigned int    cmoduleid_t;    /* kernel compact module id type */
+typedef unsigned char	slotid_t;	/* slot (blade) within module */
+typedef unsigned char	slabid_t;	/* slab (asic) within slot */
+typedef u64 nic_t;
+typedef unsigned long iopaddr_t;
+typedef unsigned long paddr_t;
+typedef short cnodeid_t;
+
+#endif /* _ASM_IA64_SN_TYPES_H */
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
new file mode 100644
index 0000000..67a7c40
--- /dev/null
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_SPARSEMEM_H
+#define _ASM_IA64_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS            2^N: how big each section will be
+ * MAX_PHYSMEM_BITS             2^N: how much memory we can have in that space
+ */
+
+#define SECTION_SIZE_BITS	(30)
+#define MAX_PHYSMEM_BITS	(50)
+#ifdef CONFIG_FORCE_MAX_ZONEORDER
+#if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
+#undef SECTION_SIZE_BITS
+#define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT)
+#endif
+#endif
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _ASM_IA64_SPARSEMEM_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
new file mode 100644
index 0000000..45698cd
--- /dev/null
+++ b/arch/ia64/include/asm/spinlock.h
@@ -0,0 +1,296 @@
+#ifndef _ASM_IA64_SPINLOCK_H
+#define _ASM_IA64_SPINLOCK_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ *
+ * This file is used for SMP configurations only.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <linux/atomic.h>
+#include <asm/intrinsics.h>
+
+#define arch_spin_lock_init(x)			((x)->lock = 0)
+
+/*
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ * The pad bits in the middle are used to prevent the next_ticket number
+ * overflowing into the now_serving number.
+ *
+ *   31             17  16    15  14                    0
+ *  +----------------------------------------------------+
+ *  |  now_serving     | padding |   next_ticket         |
+ *  +----------------------------------------------------+
+ */
+
+#define TICKET_SHIFT	17
+#define TICKET_BITS	15
+#define	TICKET_MASK	((1 << TICKET_BITS) - 1)
+
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+{
+	int	*p = (int *)&lock->lock, ticket, serve;
+
+	ticket = ia64_fetchadd(1, p, acq);
+
+	if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+		return;
+
+	ia64_invala();
+
+	for (;;) {
+		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
+
+		if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+			return;
+		cpu_relax();
+	}
+}
+
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+{
+	int tmp = ACCESS_ONCE(lock->lock);
+
+	if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
+		return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
+	return 0;
+}
+
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+{
+	unsigned short	*p = (unsigned short *)&lock->lock + 1, tmp;
+
+	asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+	ACCESS_ONCE(*p) = (tmp + 2) & ~1;
+}
+
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+{
+	int	*p = (int *)&lock->lock, ticket;
+
+	ia64_invala();
+
+	for (;;) {
+		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
+		if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+			return;
+		cpu_relax();
+	}
+}
+
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+{
+	long tmp = ACCESS_ONCE(lock->lock);
+
+	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
+}
+
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
+{
+	long tmp = ACCESS_ONCE(lock->lock);
+
+	return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	return __ticket_spin_is_locked(lock);
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+	return __ticket_spin_is_contended(lock);
+}
+#define arch_spin_is_contended	arch_spin_is_contended
+
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	__ticket_spin_lock(lock);
+}
+
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	return __ticket_spin_trylock(lock);
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	__ticket_spin_unlock(lock);
+}
+
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+						  unsigned long flags)
+{
+	arch_spin_lock(lock);
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+	__ticket_spin_unlock_wait(lock);
+}
+
+#define arch_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw)	(*(volatile int *)(rw) == 0)
+
+#ifdef ASM_SUPPORTED
+
+static __always_inline void
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+{
+	__asm__ __volatile__ (
+		"tbit.nz p6, p0 = %1,%2\n"
+		"br.few 3f\n"
+		"1:\n"
+		"fetchadd4.rel r2 = [%0], -1;;\n"
+		"(p6) ssm psr.i\n"
+		"2:\n"
+		"hint @pause\n"
+		"ld4 r2 = [%0];;\n"
+		"cmp4.lt p7,p0 = r2, r0\n"
+		"(p7) br.cond.spnt.few 2b\n"
+		"(p6) rsm psr.i\n"
+		";;\n"
+		"3:\n"
+		"fetchadd4.acq r2 = [%0], 1;;\n"
+		"cmp4.lt p7,p0 = r2, r0\n"
+		"(p7) br.cond.spnt.few 1b\n"
+		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+		: "p6", "p7", "r2", "memory");
+}
+
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
+
+#else /* !ASM_SUPPORTED */
+
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+
+#define arch_read_lock(rw)								\
+do {											\
+	arch_rwlock_t *__read_lock_ptr = (rw);						\
+											\
+	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
+		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
+		while (*(volatile int *)__read_lock_ptr < 0)				\
+			cpu_relax();							\
+	}										\
+} while (0)
+
+#endif /* !ASM_SUPPORTED */
+
+#define arch_read_unlock(rw)					\
+do {								\
+	arch_rwlock_t *__read_lock_ptr = (rw);			\
+	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
+} while (0)
+
+#ifdef ASM_SUPPORTED
+
+static __always_inline void
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+{
+	__asm__ __volatile__ (
+		"tbit.nz p6, p0 = %1, %2\n"
+		"mov ar.ccv = r0\n"
+		"dep r29 = -1, r0, 31, 1\n"
+		"br.few 3f;;\n"
+		"1:\n"
+		"(p6) ssm psr.i\n"
+		"2:\n"
+		"hint @pause\n"
+		"ld4 r2 = [%0];;\n"
+		"cmp4.eq p0,p7 = r0, r2\n"
+		"(p7) br.cond.spnt.few 2b\n"
+		"(p6) rsm psr.i\n"
+		";;\n"
+		"3:\n"
+		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
+		"cmp4.eq p0,p7 = r0, r2\n"
+		"(p7) br.cond.spnt.few 1b;;\n"
+		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
+}
+
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
+
+#define arch_write_trylock(rw)							\
+({										\
+	register long result;							\
+										\
+	__asm__ __volatile__ (							\
+		"mov ar.ccv = r0\n"						\
+		"dep r29 = -1, r0, 31, 1;;\n"					\
+		"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"				\
+		: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");		\
+	(result == 0);								\
+})
+
+static inline void arch_write_unlock(arch_rwlock_t *x)
+{
+	u8 *y = (u8 *)x;
+	barrier();
+	asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
+}
+
+#else /* !ASM_SUPPORTED */
+
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
+
+#define arch_write_lock(l)								\
+({											\
+	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
+	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
+	do {										\
+		while (*ia64_write_lock_ptr)						\
+			ia64_barrier();							\
+		ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);	\
+	} while (ia64_val);								\
+})
+
+#define arch_write_trylock(rw)						\
+({									\
+	__u64 ia64_val;							\
+	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
+	ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);	\
+	(ia64_val == 0);						\
+})
+
+static inline void arch_write_unlock(arch_rwlock_t *x)
+{
+	barrier();
+	x->write_lock = 0;
+}
+
+#endif /* !ASM_SUPPORTED */
+
+static inline int arch_read_trylock(arch_rwlock_t *x)
+{
+	union {
+		arch_rwlock_t lock;
+		__u32 word;
+	} old, new;
+	old.lock = new.lock = *x;
+	old.lock.write_lock = new.lock.write_lock = 0;
+	++new.lock.read_counter;
+	return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
+}
+
+#define arch_spin_relax(lock)	cpu_relax()
+#define arch_read_relax(lock)	cpu_relax()
+#define arch_write_relax(lock)	cpu_relax()
+
+#endif /*  _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
new file mode 100644
index 0000000..e2b42a5
--- /dev/null
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_SPINLOCK_TYPES_H
+#define _ASM_IA64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+	volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile unsigned int read_counter	: 31;
+	volatile unsigned int write_lock	:  1;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ 0, 0 }
+
+#endif
diff --git a/arch/ia64/include/asm/string.h b/arch/ia64/include/asm/string.h
new file mode 100644
index 0000000..85fd65c
--- /dev/null
+++ b/arch/ia64/include/asm/string.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_STRING_H
+#define _ASM_IA64_STRING_H
+
+/*
+ * Here is where we want to put optimized versions of the string
+ * routines.
+ *
+ * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#define __HAVE_ARCH_STRLEN	1 /* see arch/ia64/lib/strlen.S */
+#define __HAVE_ARCH_MEMSET	1 /* see arch/ia64/lib/memset.S */
+#define __HAVE_ARCH_MEMCPY	1 /* see arch/ia64/lib/memcpy.S */
+
+extern __kernel_size_t strlen (const char *);
+extern void *memcpy (void *, const void *, __kernel_size_t);
+extern void *memset (void *, int, __kernel_size_t);
+
+#endif /* _ASM_IA64_STRING_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
new file mode 100644
index 0000000..f0acde6
--- /dev/null
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -0,0 +1,17 @@
+#ifndef ASM_IA64__SWIOTLB_H
+#define ASM_IA64__SWIOTLB_H
+
+#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
+
+#ifdef CONFIG_SWIOTLB
+extern int swiotlb;
+extern void pci_swiotlb_init(void);
+#else
+#define swiotlb 0
+static inline void pci_swiotlb_init(void)
+{
+}
+#endif
+
+#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h
new file mode 100644
index 0000000..e8f3585
--- /dev/null
+++ b/arch/ia64/include/asm/switch_to.h
@@ -0,0 +1,79 @@
+/*
+ * Low-level task switching. This is based on information published in
+ * the Processor Abstraction Layer and the System Abstraction Layer
+ * manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+#ifndef _ASM_IA64_SWITCH_TO_H
+#define _ASM_IA64_SWITCH_TO_H
+
+#include <linux/percpu.h>
+
+struct task_struct;
+
+/*
+ * Context switch from one thread to another.  If the two threads have
+ * different address spaces, schedule() has already taken care of
+ * switching to the new address space by calling switch_mm().
+ *
+ * Disabling access to the fph partition and the debug-register
+ * context switch MUST be done before calling ia64_switch_to() since a
+ * newly created thread returns directly to
+ * ia64_ret_from_syscall_clear_r8.
+ */
+extern struct task_struct *ia64_switch_to (void *next_task);
+
+extern void ia64_save_extra (struct task_struct *task);
+extern void ia64_load_extra (struct task_struct *task);
+
+#ifdef CONFIG_PERFMON
+  DECLARE_PER_CPU(unsigned long, pfm_syst_info);
+# define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
+#else
+# define PERFMON_IS_SYSWIDE() (0)
+#endif
+
+#define IA64_HAS_EXTRA_STATE(t)							\
+	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
+	 || PERFMON_IS_SYSWIDE())
+
+#define __switch_to(prev,next,last) do {							 \
+	if (IA64_HAS_EXTRA_STATE(prev))								 \
+		ia64_save_extra(prev);								 \
+	if (IA64_HAS_EXTRA_STATE(next))								 \
+		ia64_load_extra(next);								 \
+	ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
+	(last) = ia64_switch_to((next));							 \
+} while (0)
+
+#ifdef CONFIG_SMP
+/*
+ * In the SMP case, we save the fph state when context-switching away from a thread that
+ * modified fph.  This way, when the thread gets scheduled on another CPU, the CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of having to fetch
+ * the latest fph state from another CPU.  In other words: eager save, lazy restore.
+ */
+# define switch_to(prev,next,last) do {						\
+	if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {				\
+		ia64_psr(task_pt_regs(prev))->mfh = 0;			\
+		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;			\
+		__ia64_save_fpu((prev)->thread.fph);				\
+	}									\
+	__switch_to(prev, next, last);						\
+	/* "next" in old context is "current" in new context */			\
+	if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) &&	       \
+		     (task_cpu(current) !=				       \
+		      		      task_thread_info(current)->last_cpu))) { \
+		platform_migrate(current);				       \
+		task_thread_info(current)->last_cpu = task_cpu(current);       \
+	}								       \
+} while (0)
+#else
+# define switch_to(prev,next,last)	__switch_to(prev, next, last)
+#endif
+
+#endif /* _ASM_IA64_SWITCH_TO_H */
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
new file mode 100644
index 0000000..1d0b875
--- /dev/null
+++ b/arch/ia64/include/asm/syscall.h
@@ -0,0 +1,88 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Intel Corp.  Shaohua Li <shaohua.li@intel.com>
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H	1
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	if ((long)regs->cr_ifs < 0) /* Not a syscall */
+		return -1;
+
+	return regs->r15;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	/* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+				     struct pt_regs *regs)
+{
+	return regs->r10 == -1 ? regs->r8:0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->r8;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	if (error) {
+		/* error < 0, but ia64 uses > 0 return value */
+		regs->r8 = -error;
+		regs->r10 = -1;
+	} else {
+		regs->r8 = val;
+		regs->r10 = 0;
+	}
+}
+
+extern void ia64_syscall_get_set_arguments(struct task_struct *task,
+	struct pt_regs *regs, unsigned int i, unsigned int n,
+	unsigned long *args, int rw);
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+	ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+	ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+}
+
+static inline int syscall_get_arch(void)
+{
+	return AUDIT_ARCH_IA64;
+}
+#endif	/* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/termios.h b/arch/ia64/include/asm/termios.h
new file mode 100644
index 0000000..a42f870
--- /dev/null
+++ b/arch/ia64/include/asm/termios.h
@@ -0,0 +1,57 @@
+/*
+ * Modified 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28	Added N_IRDA and N_SMSBLOCK
+ */
+#ifndef _ASM_IA64_TERMIOS_H
+#define _ASM_IA64_TERMIOS_H
+
+#include <uapi/asm/termios.h>
+
+
+/*	intr=^C		quit=^\		erase=del	kill=^U
+	eof=^D		vtime=\0	vmin=\1		sxtc=\0
+	start=^Q	stop=^S		susp=^Z		eol=\0
+	reprint=^R	discard=^U	werase=^W	lnext=^V
+	eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) {	\
+	unsigned short __tmp;				\
+	get_user(__tmp,&(termio)->x);			\
+	*(unsigned short *) &(termios)->x = __tmp;	\
+}
+
+#define user_termio_to_kernel_termios(termios, termio)		\
+({								\
+	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);		\
+	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);		\
+	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);		\
+	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);		\
+	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC);	\
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios)		\
+({								\
+	put_user((termios)->c_iflag, &(termio)->c_iflag);	\
+	put_user((termios)->c_oflag, &(termio)->c_oflag);	\
+	put_user((termios)->c_cflag, &(termio)->c_cflag);	\
+	put_user((termios)->c_lflag, &(termio)->c_lflag);	\
+	put_user((termios)->c_line,  &(termio)->c_line);	\
+	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);	\
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* _ASM_IA64_TERMIOS_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
new file mode 100644
index 0000000..aa995b6
--- /dev/null
+++ b/arch/ia64/include/asm/thread_info.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_THREAD_INFO_H
+#define _ASM_IA64_THREAD_INFO_H
+
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/*
+ * On IA-64, we want to keep the task structure and kernel stack together, so they can be
+ * mapped by a single TLB entry and so they can be addressed by the "current" pointer
+ * without having to do pointer masking.
+ */
+struct thread_info {
+	struct task_struct *task;	/* XXX not really needed, except for dup_task_struct() */
+	__u32 flags;			/* thread_info flags (see TIF_*) */
+	__u32 cpu;			/* current CPU */
+	__u32 last_cpu;			/* Last CPU thread ran on */
+	__u32 status;			/* Thread synchronous flags */
+	mm_segment_t addr_limit;	/* user-level address space limit */
+	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+	__u64 ac_stamp;
+	__u64 ac_leave;
+	__u64 ac_stime;
+	__u64 ac_utime;
+#endif
+};
+
+#define THREAD_SIZE			KERNEL_STACK_SIZE
+
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.flags		= 0,			\
+	.cpu		= 0,			\
+	.addr_limit	= KERNEL_DS,		\
+	.preempt_count	= INIT_PREEMPT_COUNT,	\
+}
+
+#ifndef ASM_OFFSETS_C
+/* how to get the thread information struct from C */
+#define current_thread_info()	((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
+#define alloc_thread_info_node(tsk, node)	\
+		((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define task_thread_info(tsk)	((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#else
+#define current_thread_info()	((struct thread_info *) 0)
+#define alloc_thread_info_node(tsk, node)	((struct thread_info *) 0)
+#define task_thread_info(tsk)	((struct thread_info *) 0)
+#endif
+#define free_thread_info(ti)	/* nothing */
+#define task_stack_page(tsk)	((void *)(tsk))
+
+#define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define setup_thread_stack(p, org)			\
+	*task_thread_info(p) = *task_thread_info(org);	\
+	task_thread_info(p)->ac_stime = 0;		\
+	task_thread_info(p)->ac_utime = 0;		\
+	task_thread_info(p)->task = (p);
+#else
+#define setup_thread_stack(p, org) \
+	*task_thread_info(p) = *task_thread_info(org); \
+	task_thread_info(p)->task = (p);
+#endif
+#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
+
+#define alloc_task_struct_node(node)						\
+({										\
+	struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP,	\
+					     KERNEL_STACK_SIZE_ORDER);		\
+	struct task_struct *ret = page ? page_address(page) : NULL;		\
+										\
+	ret;									\
+})
+#define free_task_struct(tsk)	free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in least-significant 16 bits, other flags
+ *   in top 16 bits
+ */
+#define TIF_SIGPENDING		0	/* signal pending */
+#define TIF_NEED_RESCHED	1	/* rescheduling necessary */
+#define TIF_SYSCALL_TRACE	2	/* syscall trace active */
+#define TIF_SYSCALL_AUDIT	3	/* syscall auditing active */
+#define TIF_SINGLESTEP		4	/* restore singlestep on return to user mode */
+#define TIF_NOTIFY_RESUME	6	/* resumption notification requested */
+#define TIF_MEMDIE		17	/* is terminating due to OOM killer */
+#define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */
+#define TIF_DB_DISABLED		19	/* debug trap disabled for fsyscall */
+#define TIF_RESTORE_RSE		21	/* user RBS is newer than kernel RBS */
+#define TIF_POLLING_NRFLAG	22	/* idle is polling for TIF_NEED_RESCHED */
+
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACEAUDIT	(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
+#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)
+#define _TIF_DB_DISABLED	(1 << TIF_DB_DISABLED)
+#define _TIF_RESTORE_RSE	(1 << TIF_RESTORE_RSE)
+#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
+
+/* "work to do on user-return" bits */
+#define TIF_ALLWORK_MASK	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
+				 _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
+/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
+#define TIF_WORK_MASK		(TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
+
+#define TS_RESTORE_SIGMASK	2	/* restore signal mask in do_signal() */
+
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK	1
+static inline void set_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	ti->status |= TS_RESTORE_SIGMASK;
+	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
+}
+#endif	/* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
new file mode 100644
index 0000000..86c7db8
--- /dev/null
+++ b/arch/ia64/include/asm/timex.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_IA64_TIMEX_H
+#define _ASM_IA64_TIMEX_H
+
+/*
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * 2001/01/18 davidm	Removed CLOCK_TICK_RATE.  It makes no sense on IA-64.
+ *			Also removed cacheflush_time as it's entirely unused.
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+typedef unsigned long cycles_t;
+
+extern void (*ia64_udelay)(unsigned long usecs);
+
+/*
+ * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
+ * local_cpu_data->itc_rate.  Fortunately, we don't have to, either: according to George
+ * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock.  The time
+ * calculation assumes that you will use enough of these so that your tick size <= 1/HZ.
+ * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks,
+ * the actual value is calculated and used to update the wall clock each jiffie.  Setting
+ * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors.  Hence we
+ * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about
+ * 100MHz.
+ */
+#define CLOCK_TICK_RATE		(HZ * 100000UL)
+
+static inline cycles_t
+get_cycles (void)
+{
+	cycles_t ret;
+
+	ret = ia64_getreg(_IA64_REG_AR_ITC);
+	return ret;
+}
+
+extern void ia64_cpu_local_tick (void);
+extern unsigned long long ia64_native_sched_clock (void);
+
+#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
new file mode 100644
index 0000000..39d64e0
--- /dev/null
+++ b/arch/ia64/include/asm/tlb.h
@@ -0,0 +1,283 @@
+#ifndef _ASM_IA64_TLB_H
+#define _ASM_IA64_TLB_H
+/*
+ * Based on <asm-generic/tlb.h>.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * Removing a translation from a page table (including TLB-shootdown) is a four-step
+ * procedure:
+ *
+ *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
+ *	    (this is a no-op on ia64).
+ *	(2) Clear the relevant portions of the page-table
+ *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
+ *	(4) Release the pages that were freed up in step (2).
+ *
+ * Note that the ordering of these steps is crucial to avoid races on MP machines.
+ *
+ * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
+ * unmapping a portion of the virtual address space, these hooks are called according to
+ * the following template:
+ *
+ *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
+ *	{
+ *	  for each vma that needs a shootdown do {
+ *	    tlb_start_vma(tlb, vma);
+ *	      for each page-table-entry PTE that needs to be removed do {
+ *		tlb_remove_tlb_entry(tlb, pte, address);
+ *		if (pte refers to a normal page) {
+ *		  tlb_remove_page(tlb, page);
+ *		}
+ *	      }
+ *	    tlb_end_vma(tlb, vma);
+ *	  }
+ *	}
+ *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
+ */
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/machvec.h>
+
+/*
+ * If we can't allocate a page to make a big batch of page pointers
+ * to work on, then just handle a few from the on-stack structure.
+ */
+#define	IA64_GATHER_BUNDLE	8
+
+struct mmu_gather {
+	struct mm_struct	*mm;
+	unsigned int		nr;
+	unsigned int		max;
+	unsigned char		fullmm;		/* non-zero means full mm flush */
+	unsigned char		need_flush;	/* really unmapped some PTEs? */
+	unsigned long		start, end;
+	unsigned long		start_addr;
+	unsigned long		end_addr;
+	struct page		**pages;
+	struct page		*local[IA64_GATHER_BUNDLE];
+};
+
+struct ia64_tr_entry {
+	u64 ifa;
+	u64 itir;
+	u64 pte;
+	u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK	0x0000000000000001L
+#define RR_VE_SHIFT	0
+#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK	0x00000000000000fcL
+#define RR_PS_SHIFT	2
+#define RR_RID_MASK	0x00000000ffffff00L
+#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)
+
+static inline void
+ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+	tlb->need_flush = 0;
+
+	if (tlb->fullmm) {
+		/*
+		 * Tearing down the entire address space.  This happens both as a result
+		 * of exit() and execve().  The latter case necessitates the call to
+		 * flush_tlb_mm() here.
+		 */
+		flush_tlb_mm(tlb->mm);
+	} else if (unlikely (end - start >= 1024*1024*1024*1024UL
+			     || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
+	{
+		/*
+		 * If we flush more than a tera-byte or across regions, we're probably
+		 * better off just flushing the entire TLB(s).  This should be very rare
+		 * and is not worth optimizing for.
+		 */
+		flush_tlb_all();
+	} else {
+		/*
+		 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
+		 * vma pointer.
+		 */
+		struct vm_area_struct vma;
+
+		vma.vm_mm = tlb->mm;
+		/* flush the address range from the tlb: */
+		flush_tlb_range(&vma, start, end);
+		/* now flush the virt. page-table area mapping the address range: */
+		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
+	}
+
+}
+
+static inline void
+ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+	unsigned long i;
+	unsigned int nr;
+
+	/* lastly, release the freed pages */
+	nr = tlb->nr;
+
+	tlb->nr = 0;
+	tlb->start_addr = ~0UL;
+	for (i = 0; i < nr; ++i)
+		free_page_and_swap_cache(tlb->pages[i]);
+}
+
+/*
+ * Flush the TLB for address range START to END and, if not in fast mode, release the
+ * freed pages that where gathered up to this point.
+ */
+static inline void
+ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+	if (!tlb->need_flush)
+		return;
+	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
+	ia64_tlb_flush_mmu_free(tlb);
+}
+
+static inline void __tlb_alloc_page(struct mmu_gather *tlb)
+{
+	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+
+	if (addr) {
+		tlb->pages = (void *)addr;
+		tlb->max = PAGE_SIZE / sizeof(void *);
+	}
+}
+
+
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+	tlb->mm = mm;
+	tlb->max = ARRAY_SIZE(tlb->local);
+	tlb->pages = tlb->local;
+	tlb->nr = 0;
+	tlb->fullmm = !(start | (end+1));
+	tlb->start = start;
+	tlb->end = end;
+	tlb->start_addr = ~0UL;
+}
+
+/*
+ * Called at the end of the shootdown operation to free up any resources that were
+ * collected.
+ */
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+	/*
+	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
+	 * tlb->end_addr.
+	 */
+	ia64_tlb_flush_mmu(tlb, start, end);
+
+	/* keep the page table cache within bounds */
+	check_pgt_cache();
+
+	if (tlb->pages != tlb->local)
+		free_pages((unsigned long)tlb->pages, 0);
+}
+
+/*
+ * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
+ * must be delayed until after the TLB has been flushed (see comments at the beginning of
+ * this file).
+ */
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	tlb->need_flush = 1;
+
+	if (!tlb->nr && tlb->pages == tlb->local)
+		__tlb_alloc_page(tlb);
+
+	tlb->pages[tlb->nr++] = page;
+	VM_BUG_ON(tlb->nr > tlb->max);
+
+	return tlb->max - tlb->nr;
+}
+
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+	ia64_tlb_flush_mmu_free(tlb);
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	if (!__tlb_remove_page(tlb, page))
+		tlb_flush_mmu(tlb);
+}
+
+/*
+ * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
+ * PTE, not just those pointing to (normal) physical memory.
+ */
+static inline void
+__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
+{
+	if (tlb->start_addr == ~0UL)
+		tlb->start_addr = address;
+	tlb->end_addr = address + PAGE_SIZE;
+}
+
+#define tlb_migrate_finish(mm)	platform_tlb_migrate_finish(mm)
+
+#define tlb_start_vma(tlb, vma)			do { } while (0)
+#define tlb_end_vma(tlb, vma)			do { } while (0)
+
+#define tlb_remove_tlb_entry(tlb, ptep, addr)		\
+do {							\
+	tlb->need_flush = 1;				\
+	__tlb_remove_tlb_entry(tlb, ptep, addr);	\
+} while (0)
+
+#define pte_free_tlb(tlb, ptep, address)		\
+do {							\
+	tlb->need_flush = 1;				\
+	__pte_free_tlb(tlb, ptep, address);		\
+} while (0)
+
+#define pmd_free_tlb(tlb, ptep, address)		\
+do {							\
+	tlb->need_flush = 1;				\
+	__pmd_free_tlb(tlb, ptep, address);		\
+} while (0)
+
+#define pud_free_tlb(tlb, pudp, address)		\
+do {							\
+	tlb->need_flush = 1;				\
+	__pud_free_tlb(tlb, pudp, address);		\
+} while (0)
+
+#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h
new file mode 100644
index 0000000..3be25df
--- /dev/null
+++ b/arch/ia64/include/asm/tlbflush.h
@@ -0,0 +1,102 @@
+#ifndef _ASM_IA64_TLBFLUSH_H
+#define _ASM_IA64_TLBFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <linux/mm.h>
+
+#include <asm/intrinsics.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+
+/*
+ * Now for some TLB flushing routines.  This is the kind of stuff that
+ * can be very expensive, so try to avoid them whenever possible.
+ */
+extern void setup_ptcg_sem(int max_purges, int from_palo);
+
+/*
+ * Flush everything (kernel mapping may also have changed due to
+ * vmalloc/vfree).
+ */
+extern void local_flush_tlb_all (void);
+
+#ifdef CONFIG_SMP
+  extern void smp_flush_tlb_all (void);
+  extern void smp_flush_tlb_mm (struct mm_struct *mm);
+  extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
+# define flush_tlb_all()	smp_flush_tlb_all()
+#else
+# define flush_tlb_all()	local_flush_tlb_all()
+# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
+#endif
+
+static inline void
+local_finish_flush_tlb_mm (struct mm_struct *mm)
+{
+	if (mm == current->active_mm)
+		activate_context(mm);
+}
+
+/*
+ * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
+ * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
+ * the PTEs of the parent task.
+ */
+static inline void
+flush_tlb_mm (struct mm_struct *mm)
+{
+	if (!mm)
+		return;
+
+	set_bit(mm->context, ia64_ctx.flushmap);
+	mm->context = 0;
+
+	if (atomic_read(&mm->mm_users) == 0)
+		return;		/* happens as a result of exit_mmap() */
+
+#ifdef CONFIG_SMP
+	smp_flush_tlb_mm(mm);
+#else
+	local_finish_flush_tlb_mm(mm);
+#endif
+}
+
+extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
+
+/*
+ * Page-granular tlb flush.
+ */
+static inline void
+flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
+{
+#ifdef CONFIG_SMP
+	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
+#else
+	if (vma->vm_mm == current->active_mm)
+		ia64_ptcl(addr, (PAGE_SHIFT << 2));
+	else
+		vma->vm_mm->context = 0;
+#endif
+}
+
+/*
+ * Flush the local TLB. Invoked from another cpu using an IPI.
+ */
+#ifdef CONFIG_SMP
+void smp_local_flush_tlb(void);
+#else
+#define smp_local_flush_tlb()
+#endif
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+					  unsigned long end)
+{
+	flush_tlb_all();	/* XXX fix me */
+}
+
+#endif /* _ASM_IA64_TLBFLUSH_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
new file mode 100644
index 0000000..3ad8f69
--- /dev/null
+++ b/arch/ia64/include/asm/topology.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2002, Erich Focht, NEC
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _ASM_IA64_TOPOLOGY_H
+#define _ASM_IA64_TOPOLOGY_H
+
+#include <asm/acpi.h>
+#include <asm/numa.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_NUMA
+
+/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
+#define PENALTY_FOR_NODE_WITH_CPUS 255
+
+/*
+ * Nodes within this distance are eligible for reclaim by zone_reclaim() when
+ * zone_reclaim_mode is enabled.
+ */
+#define RECLAIM_DISTANCE 15
+
+/*
+ * Returns a bitmask of CPUs on Node 'node'.
+ */
+#define cpumask_of_node(node) ((node) == -1 ?				\
+			       cpu_all_mask :				\
+			       &node_to_cpu_mask[node])
+
+/*
+ * Returns the number of the node containing Node 'nid'.
+ * Not implemented here. Multi-level hierarchies detected with
+ * the help of node_distance().
+ */
+#define parent_node(nid) (nid)
+
+/*
+ * Determines the node for a given pci bus
+ */
+#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
+
+void build_cpu_to_node_map(void);
+
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu)	(cpu_data(cpu)->socket_id)
+#define topology_core_id(cpu)			(cpu_data(cpu)->core_id)
+#define topology_core_cpumask(cpu)		(&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
+#endif
+
+extern void arch_fix_phys_package_id(int num, u32 slot);
+
+#define cpumask_of_pcibus(bus)	(pcibus_to_node(bus) == -1 ?		\
+				 cpu_all_mask :				\
+				 cpumask_of_node(pcibus_to_node(bus)))
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
new file mode 100644
index 0000000..4c351b1
--- /dev/null
+++ b/arch/ia64/include/asm/types.h
@@ -0,0 +1,31 @@
+/*
+ * This file is never included by application software unless explicitly
+ * requested (e.g., via linux/types.h) in which case the application is
+ * Linux specific so (user-) name space pollution is not a major issue.
+ * However, for interoperability, libraries still need to be careful to
+ * avoid naming clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_TYPES_H
+#define _ASM_IA64_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
+
+#ifdef __ASSEMBLY__
+#else
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+
+struct fnptr {
+	unsigned long ip;
+	unsigned long gp;
+};
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_TYPES_H */
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
new file mode 100644
index 0000000..40c2027
--- /dev/null
+++ b/arch/ia64/include/asm/uaccess.h
@@ -0,0 +1,400 @@
+#ifndef _ASM_IA64_UACCESS_H
+#define _ASM_IA64_UACCESS_H
+
+/*
+ * This file defines various macros to transfer memory areas across
+ * the user/kernel boundary.  This needs to be done carefully because
+ * this code is executed in kernel mode and uses user-specified
+ * addresses.  Thus, we need to be careful not to let the user to
+ * trick us into accessing kernel memory that would normally be
+ * inaccessible.  This code is also fairly performance sensitive,
+ * so we want to spend as little time doing safety checks as
+ * possible.
+ *
+ * To make matters a bit more interesting, these macros sometimes also
+ * called from within the kernel itself, in which case the address
+ * validity check must be skipped.  The get_fs() macro tells us what
+ * to do: if get_fs()==USER_DS, checking is performed, if
+ * get_fs()==KERNEL_DS, checking is bypassed.
+ *
+ * Note that even if the memory area specified by the user is in a
+ * valid address range, it is still possible that we'll get a page
+ * fault while accessing it.  This is handled by filling out an
+ * exception handler fixup entry for each instruction that has the
+ * potential to fault.  When such a fault occurs, the page fault
+ * handler checks to see whether the faulting instruction has a fixup
+ * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
+ * then resumes execution at the continuation point.
+ *
+ * Based on <asm-alpha/uaccess.h>.
+ *
+ * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+
+#include <asm/intrinsics.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+/*
+ * For historical reasons, the following macros are grossly misnamed:
+ */
+#define KERNEL_DS	((mm_segment_t) { ~0UL })		/* cf. access_ok() */
+#define USER_DS		((mm_segment_t) { TASK_SIZE-1 })	/* cf. access_ok() */
+
+#define VERIFY_READ	0
+#define VERIFY_WRITE	1
+
+#define get_ds()  (KERNEL_DS)
+#define get_fs()  (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b)	((a).seg == (b).seg)
+
+/*
+ * When accessing user memory, we need to make sure the entire area really is in
+ * user-level space.  In order to do this efficiently, we make sure that the page at
+ * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
+ * point inside the virtually mapped linear page table.
+ */
+#define __access_ok(addr, size, segment)						\
+({											\
+	__chk_user_ptr(addr);								\
+	(likely((unsigned long) (addr) <= (segment).seg)				\
+	 && ((segment).seg == KERNEL_DS.seg						\
+	     || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));	\
+})
+#define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr)	__put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
+#define get_user(x, ptr)	__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x, ptr)	__put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr)	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_unaligned_unknown (void);
+
+#define __put_user_unaligned(x, ptr)								\
+({												\
+	long __ret;										\
+	switch (sizeof(*(ptr))) {								\
+		case 1: __ret = __put_user((x), (ptr)); break;					\
+		case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))				\
+			| (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;		\
+		case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))				\
+			| (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;		\
+		case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))				\
+			| (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;		\
+		default: __ret = __put_user_unaligned_unknown();				\
+	}											\
+	__ret;											\
+})
+
+extern long __get_user_unaligned_unknown (void);
+
+#define __get_user_unaligned(x, ptr)								\
+({												\
+	long __ret;										\
+	switch (sizeof(*(ptr))) {								\
+		case 1: __ret = __get_user((x), (ptr)); break;					\
+		case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))				\
+			| (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;		\
+		case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))				\
+			| (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;		\
+		case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))				\
+			| (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;		\
+		default: __ret = __get_user_unaligned_unknown();				\
+	}											\
+	__ret;											\
+})
+
+#ifdef ASM_SUPPORTED
+  struct __large_struct { unsigned long buf[100]; };
+# define __m(x) (*(struct __large_struct __user *)(x))
+
+/* We need to declare the __ex_table section before we can use it in .xdata.  */
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+# define __get_user_size(val, addr, n, err)							\
+do {												\
+	register long __gu_r8 asm ("r8") = 0;							\
+	register long __gu_r9 asm ("r9");							\
+	asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
+	     "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"						\
+	     "[1:]"										\
+	     : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));			\
+	(err) = __gu_r8;									\
+	(val) = __gu_r9;									\
+} while (0)
+
+/*
+ * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it.  This
+ * is because they do not write to any memory gcc knows about, so there are no aliasing
+ * issues.
+ */
+# define __put_user_size(val, addr, n, err)							\
+do {												\
+	register long __pu_r8 asm ("r8") = 0;							\
+	asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
+		      "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"					\
+		      "[1:]"									\
+		      : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8));		\
+	(err) = __pu_r8;									\
+} while (0)
+
+#else /* !ASM_SUPPORTED */
+# define RELOC_TYPE	2	/* ip-rel */
+# define __get_user_size(val, addr, n, err)				\
+do {									\
+	__ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);	\
+	(err) = ia64_getreg(_IA64_REG_R8);				\
+	(val) = ia64_getreg(_IA64_REG_R9);				\
+} while (0)
+# define __put_user_size(val, addr, n, err)				\
+do {									\
+	__st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE,	\
+		  (__force unsigned long) (val));			\
+	(err) = ia64_getreg(_IA64_REG_R8);				\
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+extern void __get_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
+ * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate it while
+ * using r8/r9.
+ */
+#define __do_get_user(check, x, ptr, size, segment)					\
+({											\
+	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);				\
+	__typeof__ (size) __gu_size = (size);						\
+	long __gu_err = -EFAULT;							\
+	unsigned long __gu_val = 0;							\
+	if (!check || __access_ok(__gu_ptr, size, segment))				\
+		switch (__gu_size) {							\
+		      case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break;	\
+		      case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break;	\
+		      case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break;	\
+		      case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break;	\
+		      default: __get_user_unknown(); break;				\
+		}									\
+	(x) = (__force __typeof__(*(__gu_ptr))) __gu_val;				\
+	__gu_err;									\
+})
+
+#define __get_user_nocheck(x, ptr, size)	__do_get_user(0, x, ptr, size, KERNEL_DS)
+#define __get_user_check(x, ptr, size, segment)	__do_get_user(1, x, ptr, size, segment)
+
+extern void __put_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
+ * could clobber r8 (among others).  Thus, be careful not to evaluate them while using r8.
+ */
+#define __do_put_user(check, x, ptr, size, segment)					\
+({											\
+	__typeof__ (x) __pu_x = (x);							\
+	__typeof__ (*(ptr)) __user *__pu_ptr = (ptr);					\
+	__typeof__ (size) __pu_size = (size);						\
+	long __pu_err = -EFAULT;							\
+											\
+	if (!check || __access_ok(__pu_ptr, __pu_size, segment))			\
+		switch (__pu_size) {							\
+		      case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break;	\
+		      case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break;	\
+		      case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break;	\
+		      case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break;	\
+		      default: __put_user_unknown(); break;				\
+		}									\
+	__pu_err;									\
+})
+
+#define __put_user_nocheck(x, ptr, size)	__do_put_user(0, x, ptr, size, KERNEL_DS)
+#define __put_user_check(x, ptr, size, segment)	__do_put_user(1, x, ptr, size, segment)
+
+/*
+ * Complex access routines
+ */
+extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
+					       unsigned long count);
+
+static inline unsigned long
+__copy_to_user (void __user *to, const void *from, unsigned long count)
+{
+	return __copy_user(to, (__force void __user *) from, count);
+}
+
+static inline unsigned long
+__copy_from_user (void *to, const void __user *from, unsigned long count)
+{
+	return __copy_user((__force void __user *) to, from, count);
+}
+
+#define __copy_to_user_inatomic		__copy_to_user
+#define __copy_from_user_inatomic	__copy_from_user
+#define copy_to_user(to, from, n)							\
+({											\
+	void __user *__cu_to = (to);							\
+	const void *__cu_from = (from);							\
+	long __cu_len = (n);								\
+											\
+	if (__access_ok(__cu_to, __cu_len, get_fs()))					\
+		__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);	\
+	__cu_len;									\
+})
+
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (likely(__access_ok(from, n, get_fs())))
+		n = __copy_user((__force void __user *) to, from, n);
+	else
+		memset(to, 0, n);
+	return n;
+}
+
+#define __copy_in_user(to, from, size)	__copy_user((to), (from), (size))
+
+static inline unsigned long
+copy_in_user (void __user *to, const void __user *from, unsigned long n)
+{
+	if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
+		n = __copy_user(to, from, n);
+	return n;
+}
+
+extern unsigned long __do_clear_user (void __user *, unsigned long);
+
+#define __clear_user(to, n)		__do_clear_user(to, n)
+
+#define clear_user(to, n)					\
+({								\
+	unsigned long __cu_len = (n);				\
+	if (__access_ok(to, __cu_len, get_fs()))		\
+		__cu_len = __do_clear_user(to, __cu_len);	\
+	__cu_len;						\
+})
+
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
+ * strlen.
+ */
+extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
+
+#define strncpy_from_user(to, from, n)					\
+({									\
+	const char __user * __sfu_from = (from);			\
+	long __sfu_ret = -EFAULT;					\
+	if (__access_ok(__sfu_from, 0, get_fs()))			\
+		__sfu_ret = __strncpy_from_user((to), __sfu_from, (n));	\
+	__sfu_ret;							\
+})
+
+/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
+extern unsigned long __strlen_user (const char __user *);
+
+#define strlen_user(str)				\
+({							\
+	const char __user *__su_str = (str);		\
+	unsigned long __su_ret = 0;			\
+	if (__access_ok(__su_str, 0, get_fs()))		\
+		__su_ret = __strlen_user(__su_str);	\
+	__su_ret;					\
+})
+
+/*
+ * Returns: 0 if exception before NUL or reaching the supplied limit
+ * (N), a value greater than N if the limit would be exceeded, else
+ * strlen.
+ */
+extern unsigned long __strnlen_user (const char __user *, long);
+
+#define strnlen_user(str, len)					\
+({								\
+	const char __user *__su_str = (str);			\
+	unsigned long __su_ret = 0;				\
+	if (__access_ok(__su_str, 0, get_fs()))			\
+		__su_ret = __strnlen_user(__su_str, len);	\
+	__su_ret;						\
+})
+
+/* Generic code can't deal with the location-relative format that we use for compactness.  */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
+struct exception_table_entry {
+	int addr;	/* location-relative address of insn this fixup is for */
+	int cont;	/* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
+};
+
+extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
+extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
+
+static inline int
+ia64_done_with_exception (struct pt_regs *regs)
+{
+	const struct exception_table_entry *e;
+	e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+	if (e) {
+		ia64_handle_exception(regs, e);
+		return 1;
+	}
+	return 0;
+}
+
+#define ARCH_HAS_TRANSLATE_MEM_PTR	1
+static __inline__ void *
+xlate_dev_mem_ptr(phys_addr_t p)
+{
+	struct page *page;
+	void *ptr;
+
+	page = pfn_to_page(p >> PAGE_SHIFT);
+	if (PageUncached(page))
+		ptr = (void *)p + __IA64_UNCACHED_OFFSET;
+	else
+		ptr = __va(p);
+
+	return ptr;
+}
+
+/*
+ * Convert a virtual cached kernel memory pointer to an uncached pointer
+ */
+static __inline__ void *
+xlate_dev_kmem_ptr(void *p)
+{
+	struct page *page;
+	void *ptr;
+
+	page = virt_to_page((unsigned long)p);
+	if (PageUncached(page))
+		ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET;
+	else
+		ptr = p;
+
+	return ptr;
+}
+
+#endif /* _ASM_IA64_UACCESS_H */
diff --git a/arch/ia64/include/asm/unaligned.h b/arch/ia64/include/asm/unaligned.h
new file mode 100644
index 0000000..7bddc7f
--- /dev/null
+++ b/arch/ia64/include/asm/unaligned.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_IA64_UNALIGNED_H
+#define _ASM_IA64_UNALIGNED_H
+
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
+
+#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/arch/ia64/include/asm/uncached.h b/arch/ia64/include/asm/uncached.h
new file mode 100644
index 0000000..13d7e65
--- /dev/null
+++ b/arch/ia64/include/asm/uncached.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Prototypes for the uncached page allocator
+ */
+
+extern unsigned long uncached_alloc_page(int starting_nid, int n_pages);
+extern void uncached_free_page(unsigned long uc_addr, int n_pages);
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
new file mode 100644
index 0000000..74c132d
--- /dev/null
+++ b/arch/ia64/include/asm/unistd.h
@@ -0,0 +1,50 @@
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_UNISTD_H
+#define _ASM_IA64_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+
+
+#define NR_syscalls			323 /* length of syscall table */
+
+/*
+ * The following defines stop scripts/checksyscalls.sh from complaining about
+ * unimplemented system calls.  Glibc provides for each of these by using
+ * more modern equivalent system calls.
+ */
+#define __IGNORE_fork		/* clone() */
+#define __IGNORE_time		/* gettimeofday() */
+#define __IGNORE_alarm		/* setitimer(ITIMER_REAL, ... */
+#define __IGNORE_pause		/* rt_sigprocmask(), rt_sigsuspend() */
+#define __IGNORE_utime		/* utimes() */
+#define __IGNORE_getpgrp	/* getpgid() */
+#define __IGNORE_vfork		/* clone() */
+#define __IGNORE_umount2	/* umount() */
+
+#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+
+extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
+
+asmlinkage unsigned long sys_mmap(
+				unsigned long addr, unsigned long len,
+				int prot, int flags,
+				int fd, long off);
+asmlinkage unsigned long sys_mmap2(
+				unsigned long addr, unsigned long len,
+				int prot, int flags,
+				int fd, long pgoff);
+struct pt_regs;
+asmlinkage long sys_ia64_pipe(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/include/asm/unwind.h b/arch/ia64/include/asm/unwind.h
new file mode 100644
index 0000000..1af3875
--- /dev/null
+++ b/arch/ia64/include/asm/unwind.h
@@ -0,0 +1,233 @@
+#ifndef _ASM_IA64_UNWIND_H
+#define _ASM_IA64_UNWIND_H
+
+/*
+ * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * A simple API for unwinding kernel stacks.  This is used for
+ * debugging and error reporting purposes.  The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whitles, so there
+ * is not much point in implementing the full IA-64 unwind API (though
+ * it would of course be possible to implement the kernel API on top
+ * of it).
+ */
+
+struct task_struct;	/* forward declaration */
+struct switch_stack;	/* forward declaration */
+
+enum unw_application_register {
+	UNW_AR_BSP,
+	UNW_AR_BSPSTORE,
+	UNW_AR_PFS,
+	UNW_AR_RNAT,
+	UNW_AR_UNAT,
+	UNW_AR_LC,
+	UNW_AR_EC,
+	UNW_AR_FPSR,
+	UNW_AR_RSC,
+	UNW_AR_CCV,
+	UNW_AR_CSD,
+	UNW_AR_SSD
+};
+
+/*
+ * The following declarations are private to the unwind
+ * implementation:
+ */
+
+struct unw_stack {
+	unsigned long limit;
+	unsigned long top;
+};
+
+#define UNW_FLAG_INTERRUPT_FRAME	(1UL << 0)
+
+/*
+ * No user of this module should every access this structure directly
+ * as it is subject to change.  It is declared here solely so we can
+ * use automatic variables.
+ */
+struct unw_frame_info {
+	struct unw_stack regstk;
+	struct unw_stack memstk;
+	unsigned int flags;
+	short hint;
+	short prev_script;
+
+	/* current frame info: */
+	unsigned long bsp;		/* backing store pointer value */
+	unsigned long sp;		/* stack pointer value */
+	unsigned long psp;		/* previous sp value */
+	unsigned long ip;		/* instruction pointer value */
+	unsigned long pr;		/* current predicate values */
+	unsigned long *cfm_loc;		/* cfm save location (or NULL) */
+	unsigned long pt;		/* struct pt_regs location */
+
+	struct task_struct *task;
+	struct switch_stack *sw;
+
+	/* preserved state: */
+	unsigned long *bsp_loc;		/* previous bsp save location */
+	unsigned long *bspstore_loc;
+	unsigned long *pfs_loc;
+	unsigned long *rnat_loc;
+	unsigned long *rp_loc;
+	unsigned long *pri_unat_loc;
+	unsigned long *unat_loc;
+	unsigned long *pr_loc;
+	unsigned long *lc_loc;
+	unsigned long *fpsr_loc;
+	struct unw_ireg {
+		unsigned long *loc;
+		struct unw_ireg_nat {
+			unsigned long type : 3;		/* enum unw_nat_type */
+			signed long off : 61;		/* NaT word is at loc+nat.off */
+		} nat;
+	} r4, r5, r6, r7;
+	unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
+	struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
+};
+
+/*
+ * The official API follows below:
+ */
+
+struct unw_table_entry {
+	u64 start_offset;
+	u64 end_offset;
+	u64 info_offset;
+};
+
+/*
+ * Initialize unwind support.
+ */
+extern void unw_init (void);
+
+extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
+				   const void *table_start, const void *table_end);
+
+extern void unw_remove_unwind_table (void *handle);
+
+/*
+ * Prepare to unwind blocked task t.
+ */
+extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
+
+extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
+				 struct switch_stack *sw);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg);
+
+/*
+ * Unwind to previous to frame.  Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int unw_unwind (struct unw_frame_info *info);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs).  Returns 0 if successful, negative number in case of
+ * error.
+ */
+extern int unw_unwind_to_user (struct unw_frame_info *info);
+
+#define unw_is_intr_frame(info)	(((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0)
+
+static inline int
+unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
+{
+	*valp = (info)->ip;
+	return 0;
+}
+
+static inline int
+unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
+{
+	*valp = (info)->sp;
+	return 0;
+}
+
+static inline int
+unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
+{
+	*valp = (info)->psp;
+	return 0;
+}
+
+static inline int
+unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
+{
+	*valp = (info)->bsp;
+	return 0;
+}
+
+static inline int
+unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
+{
+	*valp = *(info)->cfm_loc;
+	return 0;
+}
+
+static inline int
+unw_set_cfm (struct unw_frame_info *info, unsigned long val)
+{
+	*(info)->cfm_loc = val;
+	return 0;
+}
+
+static inline int
+unw_get_rp (struct unw_frame_info *info, unsigned long *val)
+{
+	if (!info->rp_loc)
+		return -1;
+	*val = *info->rp_loc;
+	return 0;
+}
+
+extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int);
+extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int);
+extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int);
+
+static inline int
+unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat)
+{
+	return unw_access_gr(i, n, &v, &nat, 1);
+}
+
+static inline int
+unw_set_br (struct unw_frame_info *i, int n, unsigned long v)
+{
+	return unw_access_br(i, n, &v, 1);
+}
+
+static inline int
+unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v)
+{
+	return unw_access_fr(i, n, &v, 1);
+}
+
+static inline int
+unw_set_ar (struct unw_frame_info *i, int n, unsigned long v)
+{
+	return unw_access_ar(i, n, &v, 1);
+}
+
+static inline int
+unw_set_pr (struct unw_frame_info *i, unsigned long v)
+{
+	return unw_access_pr(i, &v, 1);
+}
+
+#define unw_get_gr(i,n,v,nat)	unw_access_gr(i,n,v,nat,0)
+#define unw_get_br(i,n,v)	unw_access_br(i,n,v,0)
+#define unw_get_fr(i,n,v)	unw_access_fr(i,n,v,0)
+#define unw_get_ar(i,n,v)	unw_access_ar(i,n,v,0)
+#define unw_get_pr(i,v)		unw_access_pr(i,v,0)
+
+#endif /* _ASM_UNWIND_H */
diff --git a/arch/ia64/include/asm/user.h b/arch/ia64/include/asm/user.h
new file mode 100644
index 0000000..8b98211
--- /dev/null
+++ b/arch/ia64/include/asm/user.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_IA64_USER_H
+#define _ASM_IA64_USER_H
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd).  The file contents are as
+ * follows:
+ *
+ *  upage: 1 page consisting of a user struct that tells gdb
+ *	what is present in the file.  Directly after this is a
+ *	copy of the task_struct, which is currently not used by gdb,
+ *	but it may come in handy at some point.  All of the registers
+ *	are stored as part of the upage.  The upage should always be
+ *	only one page long.
+ *  data: The data segment follows next.  We use current->end_text to
+ *	current->brk to pick up all of the user variables, plus any memory
+ *	that may have been sbrk'ed.  No attempt is made to determine if a
+ *	page is demand-zero or if a page is totally unused, we just cover
+ *	the entire range.  All of the addresses are rounded in such a way
+ *	that an integral number of pages is written.
+ *  stack: We need the stack information in order to get a meaningful
+ *	backtrace.  We need to write the data from usp to
+ *	current->start_stack, so we round each of these in order to be able
+ *	to write an integer number of pages.
+ *
+ * Modified 1998, 1999, 2001
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <linux/ptrace.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+
+#define EF_SIZE		3072	/* XXX fix me */
+
+struct user {
+	unsigned long	regs[EF_SIZE/8+32];	/* integer and fp regs */
+	size_t		u_tsize;		/* text size (pages) */
+	size_t		u_dsize;		/* data size (pages) */
+	size_t		u_ssize;		/* stack size (pages) */
+	unsigned long	start_code;		/* text starting address */
+	unsigned long	start_data;		/* data starting address */
+	unsigned long	start_stack;		/* stack starting address */
+	long int	signal;			/* signal causing core dump */
+	unsigned long	u_ar0;			/* help gdb find registers */
+	unsigned long	magic;			/* identifies a core file */
+	char		u_comm[32];		/* user command name */
+};
+
+#define NBPG			PAGE_SIZE
+#define UPAGES			1
+#define HOST_TEXT_START_ADDR	(u.start_code)
+#define HOST_DATA_START_ADDR	(u.start_data)
+#define HOST_STACK_END_ADDR	(u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_IA64_USER_H */
diff --git a/arch/ia64/include/asm/ustack.h b/arch/ia64/include/asm/ustack.h
new file mode 100644
index 0000000..b275401
--- /dev/null
+++ b/arch/ia64/include/asm/ustack.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_IA64_USTACK_H
+#define _ASM_IA64_USTACK_H
+
+#include <asm/page.h>
+#include <uapi/asm/ustack.h>
+
+/* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
+#define MAX_USER_STACK_SIZE	(RGN_MAP_LIMIT/2)
+#define STACK_TOP		(0x6000000000000000UL + RGN_MAP_LIMIT)
+#define STACK_TOP_MAX		STACK_TOP
+#endif /* _ASM_IA64_USTACK_H */
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h
new file mode 100644
index 0000000..8f6cbaa
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_UV_UV_H
+#define _ASM_IA64_UV_UV_H
+
+#include <asm/sn/simulator.h>
+
+static inline int is_uv_system(void)
+{
+	/* temporary support for running on hardware simulator */
+	return IS_MEDUSA() || ia64_platform_is("uv");
+}
+
+#endif	/* _ASM_IA64_UV_UV_H */
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
new file mode 100644
index 0000000..2a88c72
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -0,0 +1,315 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV architectural definitions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_IA64_UV_HUB_H__
+#define __ASM_IA64_UV_HUB_H__
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+
+
+/*
+ * Addressing Terminology
+ *
+ *	M       - The low M bits of a physical address represent the offset
+ *		  into the blade local memory. RAM memory on a blade is physically
+ *		  contiguous (although various IO spaces may punch holes in
+ *		  it)..
+ *
+ * 	N	- Number of bits in the node portion of a socket physical
+ * 		  address.
+ *
+ * 	NASID   - network ID of a router, Mbrick or Cbrick. Nasid values of
+ * 	 	  routers always have low bit of 1, C/MBricks have low bit
+ * 		  equal to 0. Most addressing macros that target UV hub chips
+ * 		  right shift the NASID by 1 to exclude the always-zero bit.
+ * 		  NASIDs contain up to 15 bits.
+ *
+ *	GNODE   - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+ *		  of nasids.
+ *
+ * 	PNODE   - the low N bits of the GNODE. The PNODE is the most useful variant
+ * 		  of the nasid for socket usage.
+ *
+ *
+ *  NumaLink Global Physical Address Format:
+ *  +--------------------------------+---------------------+
+ *  |00..000|      GNODE             |      NodeOffset     |
+ *  +--------------------------------+---------------------+
+ *          |<-------53 - M bits --->|<--------M bits ----->
+ *
+ *	M - number of node offset bits (35 .. 40)
+ *
+ *
+ *  Memory/UV-HUB Processor Socket Address Format:
+ *  +----------------+---------------+---------------------+
+ *  |00..000000000000|   PNODE       |      NodeOffset     |
+ *  +----------------+---------------+---------------------+
+ *                   <--- N bits --->|<--------M bits ----->
+ *
+ *	M - number of node offset bits (35 .. 40)
+ *	N - number of PNODE bits (0 .. 10)
+ *
+ *		Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
+ *		The actual values are configuration dependent and are set at
+ *		boot time. M & N values are set by the hardware/BIOS at boot.
+ */
+
+
+/*
+ * Maximum number of bricks in all partitions and in all coherency domains.
+ * This is the total number of bricks accessible in the numalink fabric. It
+ * includes all C & M bricks. Routers are NOT included.
+ *
+ * This value is also the value of the maximum number of non-router NASIDs
+ * in the numalink fabric.
+ *
+ * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
+ */
+#define UV_MAX_NUMALINK_BLADES	16384
+
+/*
+ * Maximum number of C/Mbricks within a software SSI (hardware may support
+ * more).
+ */
+#define UV_MAX_SSI_BLADES	1
+
+/*
+ * The largest possible NASID of a C or M brick (+ 2)
+ */
+#define UV_MAX_NASID_VALUE	(UV_MAX_NUMALINK_NODES * 2)
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced and are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct uv_hub_info_s {
+	unsigned long	global_mmr_base;
+	unsigned long	gpa_mask;
+	unsigned long	gnode_upper;
+	unsigned long	lowmem_remap_top;
+	unsigned long	lowmem_remap_base;
+	unsigned short	pnode;
+	unsigned short	pnode_mask;
+	unsigned short	coherency_domain_number;
+	unsigned short	numa_blade_id;
+	unsigned char	blade_processor_id;
+	unsigned char	m_val;
+	unsigned char	n_val;
+};
+DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+#define uv_hub_info 		this_cpu_ptr(&__uv_hub_info)
+#define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
+
+/*
+ * Local & Global MMR space macros.
+ * 	Note: macros are intended to be used ONLY by inline functions
+ * 	in this file - not by other kernel code.
+ * 		n -  NASID (full 15-bit global nasid)
+ * 		g -  GNODE (full 15-bit global nasid, right shifted 1)
+ * 		p -  PNODE (local part of nsids, right shifted 1)
+ */
+#define UV_NASID_TO_PNODE(n)		(((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_PNODE_TO_NASID(p)		(((p) << 1) | uv_hub_info->gnode_upper)
+
+#define UV_LOCAL_MMR_BASE		0xf4000000UL
+#define UV_GLOBAL_MMR32_BASE		0xf8000000UL
+#define UV_GLOBAL_MMR64_BASE		(uv_hub_info->global_mmr_base)
+
+#define UV_GLOBAL_MMR32_PNODE_SHIFT	15
+#define UV_GLOBAL_MMR64_PNODE_SHIFT	26
+
+#define UV_GLOBAL_MMR32_PNODE_BITS(p)	((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
+
+#define UV_GLOBAL_MMR64_PNODE_BITS(p)					\
+	((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+
+/*
+ * Macros for converting between kernel virtual addresses, socket local physical
+ * addresses, and UV global physical addresses.
+ * 	Note: use the standard __pa() & __va() macros for converting
+ * 	      between socket virtual and socket physical addresses.
+ */
+
+/* socket phys RAM --> UV global physical address */
+static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
+{
+	if (paddr < uv_hub_info->lowmem_remap_top)
+		paddr += uv_hub_info->lowmem_remap_base;
+	return paddr | uv_hub_info->gnode_upper;
+}
+
+
+/* socket virtual --> UV global physical address */
+static inline unsigned long uv_gpa(void *v)
+{
+	return __pa(v) | uv_hub_info->gnode_upper;
+}
+
+/* socket virtual --> UV global physical address */
+static inline void *uv_vgpa(void *v)
+{
+	return (void *)uv_gpa(v);
+}
+
+/* UV global physical address --> socket virtual */
+static inline void *uv_va(unsigned long gpa)
+{
+	return __va(gpa & uv_hub_info->gpa_mask);
+}
+
+/* pnode, offset --> socket virtual */
+static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
+{
+	return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
+}
+
+
+/*
+ * Access global MMRs using the low memory MMR32 space. This region supports
+ * faster MMR access but not all MMRs are accessible in this space.
+ */
+static inline unsigned long *uv_global_mmr32_address(int pnode,
+				unsigned long offset)
+{
+	return __va(UV_GLOBAL_MMR32_BASE |
+		       UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
+				 unsigned long val)
+{
+	*uv_global_mmr32_address(pnode, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr32(int pnode,
+						 unsigned long offset)
+{
+	return *uv_global_mmr32_address(pnode, offset);
+}
+
+/*
+ * Access Global MMR space using the MMR space located at the top of physical
+ * memory.
+ */
+static inline unsigned long *uv_global_mmr64_address(int pnode,
+				unsigned long offset)
+{
+	return __va(UV_GLOBAL_MMR64_BASE |
+		    UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
+				unsigned long val)
+{
+	*uv_global_mmr64_address(pnode, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr64(int pnode,
+						 unsigned long offset)
+{
+	return *uv_global_mmr64_address(pnode, offset);
+}
+
+/*
+ * Access hub local MMRs. Faster than using global space but only local MMRs
+ * are accessible.
+ */
+static inline unsigned long *uv_local_mmr_address(unsigned long offset)
+{
+	return __va(UV_LOCAL_MMR_BASE | offset);
+}
+
+static inline unsigned long uv_read_local_mmr(unsigned long offset)
+{
+	return *uv_local_mmr_address(offset);
+}
+
+static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
+{
+	*uv_local_mmr_address(offset) = val;
+}
+
+/*
+ * Structures and definitions for converting between cpu, node, pnode, and blade
+ * numbers.
+ */
+
+/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
+static inline int uv_blade_processor_id(void)
+{
+	return smp_processor_id();
+}
+
+/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
+static inline int uv_numa_blade_id(void)
+{
+	return 0;
+}
+
+/* Convert a cpu number to the the UV blade number */
+static inline int uv_cpu_to_blade_id(int cpu)
+{
+	return 0;
+}
+
+/* Convert linux node number to the UV blade number */
+static inline int uv_node_to_blade_id(int nid)
+{
+	return 0;
+}
+
+/* Convert a blade id to the PNODE of the blade */
+static inline int uv_blade_to_pnode(int bid)
+{
+	return 0;
+}
+
+/* Determine the number of possible cpus on a blade */
+static inline int uv_blade_nr_possible_cpus(int bid)
+{
+	return num_possible_cpus();
+}
+
+/* Determine the number of online cpus on a blade */
+static inline int uv_blade_nr_online_cpus(int bid)
+{
+	return num_online_cpus();
+}
+
+/* Convert a cpu id to the PNODE of the blade containing the cpu */
+static inline int uv_cpu_to_pnode(int cpu)
+{
+	return 0;
+}
+
+/* Convert a linux node number to the PNODE of the blade */
+static inline int uv_node_to_pnode(int nid)
+{
+	return 0;
+}
+
+/* Maximum possible number of blades */
+static inline int uv_num_possible_blades(void)
+{
+	return 1;
+}
+
+static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
+{
+	/* not currently needed on ia64 */
+}
+
+
+#endif /* __ASM_IA64_UV_HUB__ */
+
diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h
new file mode 100644
index 0000000..fe0b8f0
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv_mmrs.h
@@ -0,0 +1,825 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV MMR definitions
+ *
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_UV_UV_MMRS_H
+#define _ASM_IA64_UV_UV_MMRS_H
+
+#define UV_MMR_ENABLE		(1UL << 63)
+
+/* ========================================================================= */
+/*                           UVH_BAU_DATA_CONFIG                             */
+/* ========================================================================= */
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x0438
+
+#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
+#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
+#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
+#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
+#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_BAU_DATA_CONFIG_P_SHFT 13
+#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_BAU_DATA_CONFIG_T_SHFT 15
+#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_BAU_DATA_CONFIG_M_SHFT 16
+#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
+#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_bau_data_config_u {
+    unsigned long	v;
+    struct uvh_bau_data_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                           UVH_EVENT_OCCURRED0                             */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x005e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+union uvh_event_occurred0_u {
+    unsigned long	v;
+    struct uvh_event_occurred0_s {
+	unsigned long	lb_hcerr             :  1;  /* RW, W1C */
+	unsigned long	gr0_hcerr            :  1;  /* RW, W1C */
+	unsigned long	gr1_hcerr            :  1;  /* RW, W1C */
+	unsigned long	lh_hcerr             :  1;  /* RW, W1C */
+	unsigned long	rh_hcerr             :  1;  /* RW, W1C */
+	unsigned long	xn_hcerr             :  1;  /* RW, W1C */
+	unsigned long	si_hcerr             :  1;  /* RW, W1C */
+	unsigned long	lb_aoerr0            :  1;  /* RW, W1C */
+	unsigned long	gr0_aoerr0           :  1;  /* RW, W1C */
+	unsigned long	gr1_aoerr0           :  1;  /* RW, W1C */
+	unsigned long	lh_aoerr0            :  1;  /* RW, W1C */
+	unsigned long	rh_aoerr0            :  1;  /* RW, W1C */
+	unsigned long	xn_aoerr0            :  1;  /* RW, W1C */
+	unsigned long	si_aoerr0            :  1;  /* RW, W1C */
+	unsigned long	lb_aoerr1            :  1;  /* RW, W1C */
+	unsigned long	gr0_aoerr1           :  1;  /* RW, W1C */
+	unsigned long	gr1_aoerr1           :  1;  /* RW, W1C */
+	unsigned long	lh_aoerr1            :  1;  /* RW, W1C */
+	unsigned long	rh_aoerr1            :  1;  /* RW, W1C */
+	unsigned long	xn_aoerr1            :  1;  /* RW, W1C */
+	unsigned long	si_aoerr1            :  1;  /* RW, W1C */
+	unsigned long	rh_vpi_int           :  1;  /* RW, W1C */
+	unsigned long	system_shutdown_int  :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_0         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_1         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_2         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_3         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_4         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_5         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_6         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_7         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_8         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_9         :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_10        :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_11        :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_12        :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_13        :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_14        :  1;  /* RW, W1C */
+	unsigned long	lb_irq_int_15        :  1;  /* RW, W1C */
+	unsigned long	l1_nmi_int           :  1;  /* RW, W1C */
+	unsigned long	stop_clock           :  1;  /* RW, W1C */
+	unsigned long	asic_to_l1           :  1;  /* RW, W1C */
+	unsigned long	l1_to_asic           :  1;  /* RW, W1C */
+	unsigned long	ltc_int              :  1;  /* RW, W1C */
+	unsigned long	la_seq_trigger       :  1;  /* RW, W1C */
+	unsigned long	ipi_int              :  1;  /* RW, W1C */
+	unsigned long	extio_int0           :  1;  /* RW, W1C */
+	unsigned long	extio_int1           :  1;  /* RW, W1C */
+	unsigned long	extio_int2           :  1;  /* RW, W1C */
+	unsigned long	extio_int3           :  1;  /* RW, W1C */
+	unsigned long	profile_int          :  1;  /* RW, W1C */
+	unsigned long	rtc0                 :  1;  /* RW, W1C */
+	unsigned long	rtc1                 :  1;  /* RW, W1C */
+	unsigned long	rtc2                 :  1;  /* RW, W1C */
+	unsigned long	rtc3                 :  1;  /* RW, W1C */
+	unsigned long	bau_data             :  1;  /* RW, W1C */
+	unsigned long	power_management_req :  1;  /* RW, W1C */
+	unsigned long	rsvd_57_63           :  7;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                        UVH_EVENT_OCCURRED0_ALIAS                          */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
+
+/* ========================================================================= */
+/*                         UVH_GR0_TLB_INT0_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int0_config_u {
+    unsigned long	v;
+    struct uvh_gr0_tlb_int0_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR0_TLB_INT1_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int1_config_u {
+    unsigned long	v;
+    struct uvh_gr0_tlb_int1_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR1_TLB_INT0_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int0_config_u {
+    unsigned long	v;
+    struct uvh_gr1_tlb_int0_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR1_TLB_INT1_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int1_config_u {
+    unsigned long	v;
+    struct uvh_gr1_tlb_int1_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                               UVH_INT_CMPB                                */
+/* ========================================================================= */
+#define UVH_INT_CMPB 0x22080UL
+
+#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpb_u {
+    unsigned long	v;
+    struct uvh_int_cmpb_s {
+	unsigned long	real_time_cmpb : 56;  /* RW */
+	unsigned long	rsvd_56_63     :  8;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                               UVH_INT_CMPC                                */
+/* ========================================================================= */
+#define UVH_INT_CMPC 0x22100UL
+
+#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpc_u {
+    unsigned long	v;
+    struct uvh_int_cmpc_s {
+	unsigned long	real_time_cmpc : 56;  /* RW */
+	unsigned long	rsvd_56_63     :  8;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                               UVH_INT_CMPD                                */
+/* ========================================================================= */
+#define UVH_INT_CMPD 0x22180UL
+
+#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpd_u {
+    unsigned long	v;
+    struct uvh_int_cmpd_s {
+	unsigned long	real_time_cmpd : 56;  /* RW */
+	unsigned long	rsvd_56_63     :  8;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                               UVH_NODE_ID                                 */
+/* ========================================================================= */
+#define UVH_NODE_ID 0x0UL
+
+#define UVH_NODE_ID_FORCE1_SHFT 0
+#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_SHFT 28
+#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UVH_NODE_ID_NI_PORT_SHFT 56
+#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+union uvh_node_id_u {
+    unsigned long	v;
+    struct uvh_node_id_s {
+	unsigned long	force1        :  1;  /* RO */
+	unsigned long	manufacturer  : 11;  /* RO */
+	unsigned long	part_number   : 16;  /* RO */
+	unsigned long	revision      :  4;  /* RO */
+	unsigned long	node_id       : 15;  /* RW */
+	unsigned long	rsvd_47       :  1;  /*    */
+	unsigned long	nodes_per_bit :  7;  /* RW */
+	unsigned long	rsvd_55       :  1;  /*    */
+	unsigned long	ni_port       :  4;  /* RO */
+	unsigned long	rsvd_60_63    :  4;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+	unsigned long	rsvd_0_23 : 24;  /*    */
+	unsigned long	dest_base : 22;  /* RW */
+	unsigned long	rsvd_46_63: 18;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+	unsigned long	rsvd_0_23 : 24;  /*    */
+	unsigned long	dest_base : 22;  /* RW */
+	unsigned long	rsvd_46_63: 18;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+	unsigned long	rsvd_0_23 : 24;  /*    */
+	unsigned long	dest_base : 22;  /* RW */
+	unsigned long	rsvd_46_63: 18;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
+/* ========================================================================= */
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_gru_overlay_config_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_gru_overlay_config_mmr_s {
+	unsigned long	rsvd_0_27: 28;  /*    */
+	unsigned long	base   : 18;  /* RW */
+	unsigned long	rsvd_46_47:  2;  /*    */
+	unsigned long	gr4    :  1;  /* RW */
+	unsigned long	rsvd_49_51:  3;  /*    */
+	unsigned long	n_gru  :  4;  /* RW */
+	unsigned long	rsvd_56_62:  7;  /*    */
+	unsigned long	enable :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                    UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR                      */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_mmr_overlay_config_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+	unsigned long	rsvd_0_25: 26;  /*    */
+	unsigned long	base     : 20;  /* RW */
+	unsigned long	dual_hub :  1;  /* RW */
+	unsigned long	rsvd_47_62: 16;  /*    */
+	unsigned long	enable   :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                                 UVH_RTC                                   */
+/* ========================================================================= */
+#define UVH_RTC 0x340000UL
+
+#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
+#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
+
+union uvh_rtc_u {
+    unsigned long	v;
+    struct uvh_rtc_s {
+	unsigned long	real_time_clock : 56;  /* RW */
+	unsigned long	rsvd_56_63      :  8;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                           UVH_RTC1_INT_CONFIG                             */
+/* ========================================================================= */
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
+
+#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
+#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
+#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
+#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc1_int_config_u {
+    unsigned long	v;
+    struct uvh_rtc1_int_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                           UVH_RTC2_INT_CONFIG                             */
+/* ========================================================================= */
+#define UVH_RTC2_INT_CONFIG 0x61600UL
+
+#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC2_INT_CONFIG_P_SHFT 13
+#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC2_INT_CONFIG_T_SHFT 15
+#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC2_INT_CONFIG_M_SHFT 16
+#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc2_int_config_u {
+    unsigned long	v;
+    struct uvh_rtc2_int_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                           UVH_RTC3_INT_CONFIG                             */
+/* ========================================================================= */
+#define UVH_RTC3_INT_CONFIG 0x61640UL
+
+#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC3_INT_CONFIG_P_SHFT 13
+#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC3_INT_CONFIG_T_SHFT 15
+#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC3_INT_CONFIG_M_SHFT 16
+#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc3_int_config_u {
+    unsigned long	v;
+    struct uvh_rtc3_int_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                            UVH_RTC_INC_RATIO                              */
+/* ========================================================================= */
+#define UVH_RTC_INC_RATIO 0x350000UL
+
+#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
+#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
+#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
+#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
+
+union uvh_rtc_inc_ratio_u {
+    unsigned long	v;
+    struct uvh_rtc_inc_ratio_s {
+	unsigned long	fraction : 20;  /* RW */
+	unsigned long	ratio    :  3;  /* RW */
+	unsigned long	rsvd_23_63: 41;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                          UVH_SI_ADDR_MAP_CONFIG                           */
+/* ========================================================================= */
+#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
+
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
+
+union uvh_si_addr_map_config_u {
+    unsigned long	v;
+    struct uvh_si_addr_map_config_s {
+	unsigned long	m_skt :  6;  /* RW */
+	unsigned long	rsvd_6_7:  2;  /*    */
+	unsigned long	n_skt :  4;  /* RW */
+	unsigned long	rsvd_12_63: 52;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                       UVH_SI_ALIAS0_OVERLAY_CONFIG                        */
+/* ========================================================================= */
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
+
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias0_overlay_config_u {
+    unsigned long	v;
+    struct uvh_si_alias0_overlay_config_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                       UVH_SI_ALIAS1_OVERLAY_CONFIG                        */
+/* ========================================================================= */
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
+
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias1_overlay_config_u {
+    unsigned long	v;
+    struct uvh_si_alias1_overlay_config_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                       UVH_SI_ALIAS2_OVERLAY_CONFIG                        */
+/* ========================================================================= */
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
+
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias2_overlay_config_u {
+    unsigned long	v;
+    struct uvh_si_alias2_overlay_config_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+
+#endif /* _ASM_IA64_UV_UV_MMRS_H */
diff --git a/arch/ia64/include/asm/vga.h b/arch/ia64/include/asm/vga.h
new file mode 100644
index 0000000..02184ec
--- /dev/null
+++ b/arch/ia64/include/asm/vga.h
@@ -0,0 +1,25 @@
+/*
+ *	Access to VGA videoram
+ *
+ *	(c) 1998 Martin Mares <mj@ucw.cz>
+ *	(c) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ *	(c) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#ifndef __ASM_IA64_VGA_H_
+#define __ASM_IA64_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then access the
+ * videoram directly without any black magic.
+ */
+
+extern unsigned long vga_console_iobase;
+extern unsigned long vga_console_membase;
+
+#define VGA_MAP_MEM(x,s)	((unsigned long) ioremap_nocache(vga_console_membase + (x), s))
+
+#define vga_readb(x)	(*(x))
+#define vga_writeb(x,y)	(*(y) = (x))
+
+#endif /* __ASM_IA64_VGA_H_ */
diff --git a/arch/ia64/include/asm/xor.h b/arch/ia64/include/asm/xor.h
new file mode 100644
index 0000000..a349e23
--- /dev/null
+++ b/arch/ia64/include/asm/xor.h
@@ -0,0 +1,31 @@
+/*
+ * Optimized RAID-5 checksumming functions for IA-64.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *,
+		       unsigned long *);
+extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *,
+		       unsigned long *, unsigned long *);
+extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *,
+		       unsigned long *, unsigned long *, unsigned long *);
+
+static struct xor_block_template xor_block_ia64 = {
+	.name =	"ia64",
+	.do_2 =	xor_ia64_2,
+	.do_3 =	xor_ia64_3,
+	.do_4 =	xor_ia64_4,
+	.do_5 =	xor_ia64_5,
+};
+
+#define XOR_TRY_TEMPLATES	xor_speed(&xor_block_ia64)
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
new file mode 100644
index 0000000..891002b
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -0,0 +1,49 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += kvm_para.h
+
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += break.h
+header-y += byteorder.h
+header-y += cmpxchg.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += fpu.h
+header-y += gcc_intrin.h
+header-y += ia64regs.h
+header-y += intel_intrin.h
+header-y += intrinsics.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += kvm_para.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += perfmon.h
+header-y += perfmon_default_smpl.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += ptrace_offsets.h
+header-y += resource.h
+header-y += rse.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += ucontext.h
+header-y += unistd.h
+header-y += ustack.h
diff --git a/arch/ia64/include/uapi/asm/auxvec.h b/arch/ia64/include/uapi/asm/auxvec.h
new file mode 100644
index 0000000..58277fc
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/auxvec.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_IA64_AUXVEC_H
+#define _ASM_IA64_AUXVEC_H
+
+/*
+ * Architecture-neutral AT_ values are in the range 0-17.  Leave some room for more of
+ * them, start the architecture-specific ones at 32.
+ */
+#define AT_SYSINFO	32
+#define AT_SYSINFO_EHDR	33
+
+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
+
+#endif /* _ASM_IA64_AUXVEC_H */
diff --git a/arch/ia64/include/uapi/asm/bitsperlong.h b/arch/ia64/include/uapi/asm/bitsperlong.h
new file mode 100644
index 0000000..ec4db3c
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_IA64_BITSPERLONG_H
+#define __ASM_IA64_BITSPERLONG_H
+
+#define __BITS_PER_LONG 64
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_IA64_BITSPERLONG_H */
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
new file mode 100644
index 0000000..f034020
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_IA64_BREAK_H
+#define _ASM_IA64_BREAK_H
+
+/*
+ * IA-64 Linux break numbers.
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * OS-specific debug break numbers:
+ */
+#define __IA64_BREAK_KDB		0x80100
+#define __IA64_BREAK_KPROBE		0x81000 /* .. 0x81fff */
+#define __IA64_BREAK_JPROBE		0x82000
+
+/*
+ * OS-specific break numbers:
+ */
+#define __IA64_BREAK_SYSCALL		0x100000
+
+#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/uapi/asm/byteorder.h b/arch/ia64/include/uapi/asm/byteorder.h
new file mode 100644
index 0000000..a8dd735
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/byteorder.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_BYTEORDER_H
+#define _ASM_IA64_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
new file mode 100644
index 0000000..a0e3620
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -0,0 +1,154 @@
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+/*
+ * Compare/Exchange, forked from asm/intrinsics.h
+ * which was:
+ *
+ *	Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer(void);
+
+#define __xchg(x, ptr, size)						\
+({									\
+	unsigned long __xchg_result;					\
+									\
+	switch (size) {							\
+	case 1:								\
+		__xchg_result = ia64_xchg1((__u8 *)ptr, x);		\
+		break;							\
+									\
+	case 2:								\
+		__xchg_result = ia64_xchg2((__u16 *)ptr, x);		\
+		break;							\
+									\
+	case 4:								\
+		__xchg_result = ia64_xchg4((__u32 *)ptr, x);		\
+		break;							\
+									\
+	case 8:								\
+		__xchg_result = ia64_xchg8((__u64 *)ptr, x);		\
+		break;							\
+	default:							\
+		ia64_xchg_called_with_bad_pointer();			\
+	}								\
+	__xchg_result;							\
+})
+
+#define xchg(ptr, x)							\
+((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer(void);
+
+#define ia64_cmpxchg(sem, ptr, old, new, size)				\
+({									\
+	__u64 _o_, _r_;							\
+									\
+	switch (size) {							\
+	case 1:								\
+		_o_ = (__u8) (long) (old);				\
+		break;							\
+	case 2:								\
+		_o_ = (__u16) (long) (old);				\
+		break;							\
+	case 4:								\
+		_o_ = (__u32) (long) (old);				\
+		break;							\
+	case 8:								\
+		_o_ = (__u64) (long) (old);				\
+		break;							\
+	default:							\
+		break;							\
+	}								\
+	switch (size) {							\
+	case 1:								\
+		_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);	\
+		break;							\
+									\
+	case 2:								\
+		_r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);	\
+		break;							\
+									\
+	case 4:								\
+		_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);	\
+		break;							\
+									\
+	case 8:								\
+		_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);	\
+		break;							\
+									\
+	default:							\
+		_r_ = ia64_cmpxchg_called_with_bad_pointer();		\
+		break;							\
+	}								\
+	(__typeof__(old)) _r_;						\
+})
+
+#define cmpxchg_acq(ptr, o, n)	\
+	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n)	\
+	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * Worse still - early processor implementations actually just ignored
+ * the acquire/release and did a full fence all the time.  Unfortunately
+ * this meant a lot of badly written code that used .acq when they really
+ * wanted .rel became legacy out in the wild - so when we made a cpu
+ * that strictly did the .acq or .rel ... all that code started breaking - so
+ * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
+ */
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local		cmpxchg
+#define cmpxchg64_local		cmpxchg64
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v)						\
+do {									\
+	if (_cmpxchg_bugcheck_count-- <= 0) {				\
+		void *ip;						\
+		extern int printk(const char *fmt, ...);		\
+		ip = (void *) ia64_getreg(_IA64_REG_IP);		\
+		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
+		break;							\
+	}								\
+} while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/uapi/asm/errno.h b/arch/ia64/include/uapi/asm/errno.h
new file mode 100644
index 0000000..4c82b50
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/arch/ia64/include/uapi/asm/fcntl.h b/arch/ia64/include/uapi/asm/fcntl.h
new file mode 100644
index 0000000..7b48587
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/fcntl.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_IA64_FCNTL_H
+#define _ASM_IA64_FCNTL_H
+/*
+ * Modified 1998-2000
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#define force_o_largefile()	\
+		(personality(current->personality) != PER_LINUX32)
+
+#include <linux/personality.h>
+#include <asm-generic/fcntl.h>
+
+#endif /* _ASM_IA64_FCNTL_H */
diff --git a/arch/ia64/include/uapi/asm/fpu.h b/arch/ia64/include/uapi/asm/fpu.h
new file mode 100644
index 0000000..b6395ad
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/fpu.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_FPU_H
+#define _ASM_IA64_FPU_H
+
+/*
+ * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/types.h>
+
+/* floating point status register: */
+#define FPSR_TRAP_VD	(1 << 0)	/* invalid op trap disabled */
+#define FPSR_TRAP_DD	(1 << 1)	/* denormal trap disabled */
+#define FPSR_TRAP_ZD	(1 << 2)	/* zero-divide trap disabled */
+#define FPSR_TRAP_OD	(1 << 3)	/* overflow trap disabled */
+#define FPSR_TRAP_UD	(1 << 4)	/* underflow trap disabled */
+#define FPSR_TRAP_ID	(1 << 5)	/* inexact trap disabled */
+#define FPSR_S0(x)	((x) <<  6)
+#define FPSR_S1(x)	((x) << 19)
+#define FPSR_S2(x)	(__IA64_UL(x) << 32)
+#define FPSR_S3(x)	(__IA64_UL(x) << 45)
+
+/* floating-point status field controls: */
+#define FPSF_FTZ	(1 << 0)		/* flush-to-zero */
+#define FPSF_WRE	(1 << 1)		/* widest-range exponent */
+#define FPSF_PC(x)	(((x) & 0x3) << 2)	/* precision control */
+#define FPSF_RC(x)	(((x) & 0x3) << 4)	/* rounding control */
+#define FPSF_TD		(1 << 6)		/* trap disabled */
+
+/* floating-point status field flags: */
+#define FPSF_V		(1 <<  7)		/* invalid operation flag */
+#define FPSF_D		(1 <<  8)		/* denormal/unnormal operand flag */
+#define FPSF_Z		(1 <<  9)		/* zero divide (IEEE) flag */
+#define FPSF_O		(1 << 10)		/* overflow (IEEE) flag */
+#define FPSF_U		(1 << 11)		/* underflow (IEEE) flag */
+#define FPSF_I		(1 << 12)		/* inexact (IEEE) flag) */
+
+/* floating-point rounding control: */
+#define FPRC_NEAREST	0x0
+#define FPRC_NEGINF	0x1
+#define FPRC_POSINF	0x2
+#define FPRC_TRUNC	0x3
+
+#define FPSF_DEFAULT	(FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
+
+/* This default value is the same as HP-UX uses.  Don't change it
+   without a very good reason.  */
+#define FPSR_DEFAULT	(FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD	\
+			 | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID	\
+			 | FPSR_S0 (FPSF_DEFAULT)			\
+			 | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE)	\
+			 | FPSR_S2 (FPSF_DEFAULT | FPSF_TD)		\
+			 | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
+
+# ifndef __ASSEMBLY__
+
+struct ia64_fpreg {
+	union {
+		unsigned long bits[2];
+		long double __dummy;	/* force 16-byte alignment */
+	} u;
+};
+
+# endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_FPU_H */
diff --git a/arch/ia64/include/uapi/asm/gcc_intrin.h b/arch/ia64/include/uapi/asm/gcc_intrin.h
new file mode 100644
index 0000000..61d0d01
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/gcc_intrin.h
@@ -0,0 +1,618 @@
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+#ifndef _UAPI_ASM_IA64_GCC_INTRIN_H
+#define _UAPI_ASM_IA64_GCC_INTRIN_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* define this macro to get some asm stmts included in 'c' files */
+#define ASM_SUPPORTED
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define ia64_barrier()	asm volatile ("":::"memory")
+
+#define ia64_stop()	asm volatile (";;"::)
+
+#define ia64_invala_gr(regnum)	asm volatile ("invala.e r%0" :: "i"(regnum))
+
+#define ia64_invala_fr(regnum)	asm volatile ("invala.e f%0" :: "i"(regnum))
+
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+extern void ia64_bad_param_for_setreg (void);
+extern void ia64_bad_param_for_getreg (void);
+
+
+#define ia64_native_setreg(regnum, val)						\
+({										\
+	switch (regnum) {							\
+	    case _IA64_REG_PSR_L:						\
+		    asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");	\
+		    break;							\
+	    case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
+		    asm volatile ("mov ar%0=%1" ::				\
+		    			  "i" (regnum - _IA64_REG_AR_KR0),	\
+					  "r"(val): "memory");			\
+		    break;							\
+	    case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:			\
+		    asm volatile ("mov cr%0=%1" ::				\
+				          "i" (regnum - _IA64_REG_CR_DCR),	\
+					  "r"(val): "memory" );			\
+		    break;							\
+	    case _IA64_REG_SP:							\
+		    asm volatile ("mov r12=%0" ::				\
+			    		  "r"(val): "memory");			\
+		    break;							\
+	    case _IA64_REG_GP:							\
+		    asm volatile ("mov gp=%0" :: "r"(val) : "memory");		\
+		break;								\
+	    default:								\
+		    ia64_bad_param_for_setreg();				\
+		    break;							\
+	}									\
+})
+
+#define ia64_native_getreg(regnum)						\
+({										\
+	__u64 ia64_intri_res;							\
+										\
+	switch (regnum) {							\
+	case _IA64_REG_GP:							\
+		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\
+		break;								\
+	case _IA64_REG_IP:							\
+		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\
+		break;								\
+	case _IA64_REG_PSR:							\
+		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\
+		break;								\
+	case _IA64_REG_TP:	/* for current() */				\
+		ia64_intri_res = ia64_r13;					\
+		break;								\
+	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
+		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\
+				      : "i"(regnum - _IA64_REG_AR_KR0));	\
+		break;								\
+	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\
+		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\
+				      : "i" (regnum - _IA64_REG_CR_DCR));	\
+		break;								\
+	case _IA64_REG_SP:							\
+		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\
+		break;								\
+	default:								\
+		ia64_bad_param_for_getreg();					\
+		break;								\
+	}									\
+	ia64_intri_res;								\
+})
+
+#define ia64_hint_pause 0
+
+#define ia64_hint(mode)						\
+({								\
+	switch (mode) {						\
+	case ia64_hint_pause:					\
+		asm volatile ("hint @pause" ::: "memory");	\
+		break;						\
+	}							\
+})
+
+
+/* Integer values for mux1 instruction */
+#define ia64_mux1_brcst 0
+#define ia64_mux1_mix   8
+#define ia64_mux1_shuf  9
+#define ia64_mux1_alt  10
+#define ia64_mux1_rev  11
+
+#define ia64_mux1(x, mode)							\
+({										\
+	__u64 ia64_intri_res;							\
+										\
+	switch (mode) {								\
+	case ia64_mux1_brcst:							\
+		asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));	\
+		break;								\
+	case ia64_mux1_mix:							\
+		asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));	\
+		break;								\
+	case ia64_mux1_shuf:							\
+		asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));	\
+		break;								\
+	case ia64_mux1_alt:							\
+		asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));	\
+		break;								\
+	case ia64_mux1_rev:							\
+		asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));	\
+		break;								\
+	}									\
+	ia64_intri_res;								\
+})
+
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define ia64_popcnt(x)		__builtin_popcountl(x)
+#else
+# define ia64_popcnt(x)						\
+  ({								\
+	__u64 ia64_intri_res;					\
+	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\
+								\
+	ia64_intri_res;						\
+  })
+#endif
+
+#define ia64_getf_exp(x)					\
+({								\
+	long ia64_intri_res;					\
+								\
+	asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x));	\
+								\
+	ia64_intri_res;						\
+})
+
+#define ia64_shrp(a, b, count)								\
+({											\
+	__u64 ia64_intri_res;								\
+	asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count));	\
+	ia64_intri_res;									\
+})
+
+#define ia64_ldfs(regnum, x)					\
+({								\
+	register double __f__ asm ("f"#regnum);			\
+	asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));	\
+})
+
+#define ia64_ldfd(regnum, x)					\
+({								\
+	register double __f__ asm ("f"#regnum);			\
+	asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));	\
+})
+
+#define ia64_ldfe(regnum, x)					\
+({								\
+	register double __f__ asm ("f"#regnum);			\
+	asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));	\
+})
+
+#define ia64_ldf8(regnum, x)					\
+({								\
+	register double __f__ asm ("f"#regnum);			\
+	asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));	\
+})
+
+#define ia64_ldf_fill(regnum, x)				\
+({								\
+	register double __f__ asm ("f"#regnum);			\
+	asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x));	\
+})
+
+#define ia64_st4_rel_nta(m, val)					\
+({									\
+	asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val));	\
+})
+
+#define ia64_stfs(x, regnum)						\
+({									\
+	register double __f__ asm ("f"#regnum);				\
+	asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
+})
+
+#define ia64_stfd(x, regnum)						\
+({									\
+	register double __f__ asm ("f"#regnum);				\
+	asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
+})
+
+#define ia64_stfe(x, regnum)						\
+({									\
+	register double __f__ asm ("f"#regnum);				\
+	asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
+})
+
+#define ia64_stf8(x, regnum)						\
+({									\
+	register double __f__ asm ("f"#regnum);				\
+	asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
+})
+
+#define ia64_stf_spill(x, regnum)						\
+({										\
+	register double __f__ asm ("f"#regnum);					\
+	asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
+})
+
+#define ia64_fetchadd4_acq(p, inc)						\
+({										\
+										\
+	__u64 ia64_intri_res;							\
+	asm volatile ("fetchadd4.acq %0=[%1],%2"				\
+				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
+				: "memory");					\
+										\
+	ia64_intri_res;								\
+})
+
+#define ia64_fetchadd4_rel(p, inc)						\
+({										\
+	__u64 ia64_intri_res;							\
+	asm volatile ("fetchadd4.rel %0=[%1],%2"				\
+				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
+				: "memory");					\
+										\
+	ia64_intri_res;								\
+})
+
+#define ia64_fetchadd8_acq(p, inc)						\
+({										\
+										\
+	__u64 ia64_intri_res;							\
+	asm volatile ("fetchadd8.acq %0=[%1],%2"				\
+				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
+				: "memory");					\
+										\
+	ia64_intri_res;								\
+})
+
+#define ia64_fetchadd8_rel(p, inc)						\
+({										\
+	__u64 ia64_intri_res;							\
+	asm volatile ("fetchadd8.rel %0=[%1],%2"				\
+				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
+				: "memory");					\
+										\
+	ia64_intri_res;								\
+})
+
+#define ia64_xchg1(ptr,x)							\
+({										\
+	__u64 ia64_intri_res;							\
+	asm volatile ("xchg1 %0=[%1],%2"					\
+		      : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory");	\
+	ia64_intri_res;								\
+})
+
+#define ia64_xchg2(ptr,x)						\
+({									\
+	__u64 ia64_intri_res;						\
+	asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)	\
+		      : "r" (ptr), "r" (x) : "memory");			\
+	ia64_intri_res;							\
+})
+
+#define ia64_xchg4(ptr,x)						\
+({									\
+	__u64 ia64_intri_res;						\
+	asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)	\
+		      : "r" (ptr), "r" (x) : "memory");			\
+	ia64_intri_res;							\
+})
+
+#define ia64_xchg8(ptr,x)						\
+({									\
+	__u64 ia64_intri_res;						\
+	asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)	\
+		      : "r" (ptr), "r" (x) : "memory");			\
+	ia64_intri_res;							\
+})
+
+#define ia64_cmpxchg1_acq(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+											\
+	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old)						\
+({											\
+	__u64 ia64_intri_res;								\
+	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
+											\
+	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\
+			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
+	ia64_intri_res;									\
+})
+
+#define ia64_mf()	asm volatile ("mf" ::: "memory")
+#define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
+
+#define ia64_invala() asm volatile ("invala" ::: "memory")
+
+#define ia64_native_thash(addr)							\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
+	ia64_intri_res;								\
+})
+
+#define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory")
+#define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory");
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define ia64_dv_serialize_data()		asm volatile (".serialize.data");
+# define ia64_dv_serialize_instruction()	asm volatile (".serialize.instruction");
+#else
+# define ia64_dv_serialize_data()
+# define ia64_dv_serialize_instruction()
+#endif
+
+#define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
+
+#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+
+#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+
+
+#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
+					     :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
+					     :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_tpa(addr)								\
+({										\
+	unsigned long ia64_pa;							\
+	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
+	ia64_pa;								\
+})
+
+#define __ia64_set_dbr(index, val)						\
+	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_ibr(index, val)						\
+	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pkr(index, val)						\
+	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmc(index, val)						\
+	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmd(index, val)						\
+	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_native_set_rr(index, val)							\
+	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+#define ia64_native_get_cpuid(index)							\
+({											\
+	unsigned long ia64_intri_res;							\
+	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
+	ia64_intri_res;									\
+})
+
+#define __ia64_get_dbr(index)							\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+	ia64_intri_res;								\
+})
+
+#define ia64_get_ibr(index)							\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+	ia64_intri_res;								\
+})
+
+#define ia64_get_pkr(index)							\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+	ia64_intri_res;								\
+})
+
+#define ia64_get_pmc(index)							\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+	ia64_intri_res;								\
+})
+
+
+#define ia64_native_get_pmd(index)						\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+	ia64_intri_res;								\
+})
+
+#define ia64_native_get_rr(index)						\
+({										\
+	unsigned long ia64_intri_res;						\
+	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
+	ia64_intri_res;								\
+})
+
+#define ia64_native_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+#define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
+
+#define ia64_native_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define ia64_native_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
+#define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
+
+#define ia64_native_ptcga(addr, size)						\
+do {										\
+	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
+	ia64_dv_serialize_data();						\
+} while (0)
+
+#define ia64_ptcl(addr, size)							\
+do {										\
+	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
+	ia64_dv_serialize_data();						\
+} while (0)
+
+#define ia64_ptri(addr, size)						\
+	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptrd(addr, size)						\
+	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ttag(addr)							\
+({									  \
+	__u64 ia64_intri_res;						   \
+	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));   \
+	ia64_intri_res;							 \
+})
+
+
+/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+
+#define ia64_lfhint_none   0
+#define ia64_lfhint_nt1    1
+#define ia64_lfhint_nt2    2
+#define ia64_lfhint_nta    3
+
+#define ia64_lfetch(lfhint, y)					\
+({								\
+        switch (lfhint) {					\
+        case ia64_lfhint_none:					\
+                asm volatile ("lfetch [%0]" : : "r"(y));	\
+                break;						\
+        case ia64_lfhint_nt1:					\
+                asm volatile ("lfetch.nt1 [%0]" : : "r"(y));	\
+                break;						\
+        case ia64_lfhint_nt2:					\
+                asm volatile ("lfetch.nt2 [%0]" : : "r"(y));	\
+                break;						\
+        case ia64_lfhint_nta:					\
+                asm volatile ("lfetch.nta [%0]" : : "r"(y));	\
+                break;						\
+        }							\
+})
+
+#define ia64_lfetch_excl(lfhint, y)					\
+({									\
+        switch (lfhint) {						\
+        case ia64_lfhint_none:						\
+                asm volatile ("lfetch.excl [%0]" :: "r"(y));		\
+                break;							\
+        case ia64_lfhint_nt1:						\
+                asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));	\
+                break;							\
+        case ia64_lfhint_nt2:						\
+                asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));	\
+                break;							\
+        case ia64_lfhint_nta:						\
+                asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));	\
+                break;							\
+        }								\
+})
+
+#define ia64_lfetch_fault(lfhint, y)					\
+({									\
+        switch (lfhint) {						\
+        case ia64_lfhint_none:						\
+                asm volatile ("lfetch.fault [%0]" : : "r"(y));		\
+                break;							\
+        case ia64_lfhint_nt1:						\
+                asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));	\
+                break;							\
+        case ia64_lfhint_nt2:						\
+                asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));	\
+                break;							\
+        case ia64_lfhint_nta:						\
+                asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));	\
+                break;							\
+        }								\
+})
+
+#define ia64_lfetch_fault_excl(lfhint, y)				\
+({									\
+        switch (lfhint) {						\
+        case ia64_lfhint_none:						\
+                asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));	\
+                break;							\
+        case ia64_lfhint_nt1:						\
+                asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y));	\
+                break;							\
+        case ia64_lfhint_nt2:						\
+                asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y));	\
+                break;							\
+        case ia64_lfhint_nta:						\
+                asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y));	\
+                break;							\
+        }								\
+})
+
+#define ia64_native_intrin_local_irq_restore(x)			\
+do {								\
+	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
+		      "(p6) ssm psr.i;"				\
+		      "(p7) rsm psr.i;;"			\
+		      "(p6) srlz.d"				\
+		      :: "r"((x)) : "p6", "p7", "memory");	\
+} while (0)
+
+#endif /* _UAPI_ASM_IA64_GCC_INTRIN_H */
diff --git a/arch/ia64/include/uapi/asm/ia64regs.h b/arch/ia64/include/uapi/asm/ia64regs.h
new file mode 100644
index 0000000..1757f1c
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ia64regs.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2002,2003 Intel Corp.
+ *      Jun Nakajima <jun.nakajima@intel.com>
+ *      Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+
+#ifndef _ASM_IA64_IA64REGS_H
+#define _ASM_IA64_IA64REGS_H
+
+/*
+ * Register Names for getreg() and setreg().
+ *
+ * The "magic" numbers happen to match the values used by the Intel compiler's
+ * getreg()/setreg() intrinsics.
+ */
+
+/* Special Registers */
+
+#define _IA64_REG_IP		1016	/* getreg only */
+#define _IA64_REG_PSR		1019
+#define _IA64_REG_PSR_L		1019
+
+/* General Integer Registers */
+
+#define _IA64_REG_GP		1025	/* R1 */
+#define _IA64_REG_R8		1032	/* R8 */
+#define _IA64_REG_R9		1033	/* R9 */
+#define _IA64_REG_SP		1036	/* R12 */
+#define _IA64_REG_TP		1037	/* R13 */
+
+/* Application Registers */
+
+#define _IA64_REG_AR_KR0	3072
+#define _IA64_REG_AR_KR1	3073
+#define _IA64_REG_AR_KR2	3074
+#define _IA64_REG_AR_KR3	3075
+#define _IA64_REG_AR_KR4	3076
+#define _IA64_REG_AR_KR5	3077
+#define _IA64_REG_AR_KR6	3078
+#define _IA64_REG_AR_KR7	3079
+#define _IA64_REG_AR_RSC	3088
+#define _IA64_REG_AR_BSP	3089
+#define _IA64_REG_AR_BSPSTORE	3090
+#define _IA64_REG_AR_RNAT	3091
+#define _IA64_REG_AR_FCR	3093
+#define _IA64_REG_AR_EFLAG	3096
+#define _IA64_REG_AR_CSD	3097
+#define _IA64_REG_AR_SSD	3098
+#define _IA64_REG_AR_CFLAG	3099
+#define _IA64_REG_AR_FSR	3100
+#define _IA64_REG_AR_FIR	3101
+#define _IA64_REG_AR_FDR	3102
+#define _IA64_REG_AR_CCV	3104
+#define _IA64_REG_AR_UNAT	3108
+#define _IA64_REG_AR_FPSR	3112
+#define _IA64_REG_AR_ITC	3116
+#define _IA64_REG_AR_PFS	3136
+#define _IA64_REG_AR_LC		3137
+#define _IA64_REG_AR_EC		3138
+
+/* Control Registers */
+
+#define _IA64_REG_CR_DCR	4096
+#define _IA64_REG_CR_ITM	4097
+#define _IA64_REG_CR_IVA	4098
+#define _IA64_REG_CR_PTA	4104
+#define _IA64_REG_CR_IPSR	4112
+#define _IA64_REG_CR_ISR	4113
+#define _IA64_REG_CR_IIP	4115
+#define _IA64_REG_CR_IFA	4116
+#define _IA64_REG_CR_ITIR	4117
+#define _IA64_REG_CR_IIPA	4118
+#define _IA64_REG_CR_IFS	4119
+#define _IA64_REG_CR_IIM	4120
+#define _IA64_REG_CR_IHA	4121
+#define _IA64_REG_CR_LID	4160
+#define _IA64_REG_CR_IVR	4161	/* getreg only */
+#define _IA64_REG_CR_TPR	4162
+#define _IA64_REG_CR_EOI	4163
+#define _IA64_REG_CR_IRR0	4164	/* getreg only */
+#define _IA64_REG_CR_IRR1	4165	/* getreg only */
+#define _IA64_REG_CR_IRR2	4166	/* getreg only */
+#define _IA64_REG_CR_IRR3	4167	/* getreg only */
+#define _IA64_REG_CR_ITV	4168
+#define _IA64_REG_CR_PMV	4169
+#define _IA64_REG_CR_CMCV	4170
+#define _IA64_REG_CR_LRR0	4176
+#define _IA64_REG_CR_LRR1	4177
+
+/* Indirect Registers for getindreg() and setindreg() */
+
+#define _IA64_REG_INDR_CPUID	9000	/* getindreg only */
+#define _IA64_REG_INDR_DBR	9001
+#define _IA64_REG_INDR_IBR	9002
+#define _IA64_REG_INDR_PKR	9003
+#define _IA64_REG_INDR_PMC	9004
+#define _IA64_REG_INDR_PMD	9005
+#define _IA64_REG_INDR_RR	9006
+
+#endif /* _ASM_IA64_IA64REGS_H */
diff --git a/arch/ia64/include/uapi/asm/intel_intrin.h b/arch/ia64/include/uapi/asm/intel_intrin.h
new file mode 100644
index 0000000..53cec57
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/intel_intrin.h
@@ -0,0 +1,161 @@
+#ifndef _ASM_IA64_INTEL_INTRIN_H
+#define _ASM_IA64_INTEL_INTRIN_H
+/*
+ * Intel Compiler Intrinsics
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
+ *
+ */
+#include <ia64intrin.h>
+
+#define ia64_barrier()		__memory_barrier()
+
+#define ia64_stop()	/* Nothing: As of now stop bit is generated for each
+		 	 * intrinsic
+		 	 */
+
+#define ia64_native_getreg	__getReg
+#define ia64_native_setreg	__setReg
+
+#define ia64_hint		__hint
+#define ia64_hint_pause		__hint_pause
+
+#define ia64_mux1_brcst		_m64_mux1_brcst
+#define ia64_mux1_mix		_m64_mux1_mix
+#define ia64_mux1_shuf		_m64_mux1_shuf
+#define ia64_mux1_alt		_m64_mux1_alt
+#define ia64_mux1_rev		_m64_mux1_rev
+
+#define ia64_mux1(x,v)		_m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
+#define ia64_popcnt		_m64_popcnt
+#define ia64_getf_exp		__getf_exp
+#define ia64_shrp		_m64_shrp
+
+#define ia64_tpa		__tpa
+#define ia64_invala		__invala
+#define ia64_invala_gr		__invala_gr
+#define ia64_invala_fr		__invala_fr
+#define ia64_nop		__nop
+#define ia64_sum		__sum
+#define ia64_native_ssm		__ssm
+#define ia64_rum		__rum
+#define ia64_native_rsm		__rsm
+#define ia64_native_fc 		__fc
+
+#define ia64_ldfs		__ldfs
+#define ia64_ldfd		__ldfd
+#define ia64_ldfe		__ldfe
+#define ia64_ldf8		__ldf8
+#define ia64_ldf_fill		__ldf_fill
+
+#define ia64_stfs		__stfs
+#define ia64_stfd		__stfd
+#define ia64_stfe		__stfe
+#define ia64_stf8		__stf8
+#define ia64_stf_spill		__stf_spill
+
+#define ia64_mf			__mf
+#define ia64_mfa		__mfa
+
+#define ia64_fetchadd4_acq	__fetchadd4_acq
+#define ia64_fetchadd4_rel	__fetchadd4_rel
+#define ia64_fetchadd8_acq	__fetchadd8_acq
+#define ia64_fetchadd8_rel	__fetchadd8_rel
+
+#define ia64_xchg1		_InterlockedExchange8
+#define ia64_xchg2		_InterlockedExchange16
+#define ia64_xchg4		_InterlockedExchange
+#define ia64_xchg8		_InterlockedExchange64
+
+#define ia64_cmpxchg1_rel	_InterlockedCompareExchange8_rel
+#define ia64_cmpxchg1_acq	_InterlockedCompareExchange8_acq
+#define ia64_cmpxchg2_rel	_InterlockedCompareExchange16_rel
+#define ia64_cmpxchg2_acq	_InterlockedCompareExchange16_acq
+#define ia64_cmpxchg4_rel	_InterlockedCompareExchange_rel
+#define ia64_cmpxchg4_acq	_InterlockedCompareExchange_acq
+#define ia64_cmpxchg8_rel	_InterlockedCompareExchange64_rel
+#define ia64_cmpxchg8_acq	_InterlockedCompareExchange64_acq
+
+#define __ia64_set_dbr(index, val)	\
+		__setIndReg(_IA64_REG_INDR_DBR, index, val)
+#define ia64_set_ibr(index, val)	\
+		__setIndReg(_IA64_REG_INDR_IBR, index, val)
+#define ia64_set_pkr(index, val)	\
+		__setIndReg(_IA64_REG_INDR_PKR, index, val)
+#define ia64_set_pmc(index, val)	\
+		__setIndReg(_IA64_REG_INDR_PMC, index, val)
+#define ia64_set_pmd(index, val)	\
+		__setIndReg(_IA64_REG_INDR_PMD, index, val)
+#define ia64_native_set_rr(index, val)	\
+		__setIndReg(_IA64_REG_INDR_RR, index, val)
+
+#define ia64_native_get_cpuid(index)	\
+		__getIndReg(_IA64_REG_INDR_CPUID, index)
+#define __ia64_get_dbr(index)		__getIndReg(_IA64_REG_INDR_DBR, index)
+#define ia64_get_ibr(index)		__getIndReg(_IA64_REG_INDR_IBR, index)
+#define ia64_get_pkr(index)		__getIndReg(_IA64_REG_INDR_PKR, index)
+#define ia64_get_pmc(index)		__getIndReg(_IA64_REG_INDR_PMC, index)
+#define ia64_native_get_pmd(index)	__getIndReg(_IA64_REG_INDR_PMD, index)
+#define ia64_native_get_rr(index)	__getIndReg(_IA64_REG_INDR_RR, index)
+
+#define ia64_srlz_d		__dsrlz
+#define ia64_srlz_i		__isrlz
+
+#define ia64_dv_serialize_data()
+#define ia64_dv_serialize_instruction()
+
+#define ia64_st1_rel		__st1_rel
+#define ia64_st2_rel		__st2_rel
+#define ia64_st4_rel		__st4_rel
+#define ia64_st8_rel		__st8_rel
+
+/* FIXME: need st4.rel.nta intrinsic */
+#define ia64_st4_rel_nta	__st4_rel
+
+#define ia64_ld1_acq		__ld1_acq
+#define ia64_ld2_acq		__ld2_acq
+#define ia64_ld4_acq		__ld4_acq
+#define ia64_ld8_acq		__ld8_acq
+
+#define ia64_sync_i		__synci
+#define ia64_native_thash	__thash
+#define ia64_native_ttag	__ttag
+#define ia64_itcd		__itcd
+#define ia64_itci		__itci
+#define ia64_itrd		__itrd
+#define ia64_itri		__itri
+#define ia64_ptce		__ptce
+#define ia64_ptcl		__ptcl
+#define ia64_native_ptcg	__ptcg
+#define ia64_native_ptcga	__ptcga
+#define ia64_ptri		__ptri
+#define ia64_ptrd		__ptrd
+#define ia64_dep_mi		_m64_dep_mi
+
+/* Values for lfhint in __lfetch and __lfetch_fault */
+
+#define ia64_lfhint_none	__lfhint_none
+#define ia64_lfhint_nt1		__lfhint_nt1
+#define ia64_lfhint_nt2		__lfhint_nt2
+#define ia64_lfhint_nta		__lfhint_nta
+
+#define ia64_lfetch		__lfetch
+#define ia64_lfetch_excl	__lfetch_excl
+#define ia64_lfetch_fault	__lfetch_fault
+#define ia64_lfetch_fault_excl	__lfetch_fault_excl
+
+#define ia64_native_intrin_local_irq_restore(x)		\
+do {							\
+	if ((x) != 0) {					\
+		ia64_native_ssm(IA64_PSR_I);		\
+		ia64_srlz_d();				\
+	} else {					\
+		ia64_native_rsm(IA64_PSR_I);		\
+	}						\
+} while (0)
+
+#define __builtin_trap()	__break(0);
+
+#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/arch/ia64/include/uapi/asm/intrinsics.h b/arch/ia64/include/uapi/asm/intrinsics.h
new file mode 100644
index 0000000..5829978
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/intrinsics.h
@@ -0,0 +1,124 @@
+/*
+ * Compiler-dependent intrinsics.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _UAPI_ASM_IA64_INTRINSICS_H
+#define _UAPI_ASM_IA64_INTRINSICS_H
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+#include <asm/cmpxchg.h>
+
+#define ia64_native_get_psr_i()	(ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
+
+#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4)	\
+do {									\
+	ia64_native_set_rr(0x0000000000000000UL, (val0));		\
+	ia64_native_set_rr(0x2000000000000000UL, (val1));		\
+	ia64_native_set_rr(0x4000000000000000UL, (val2));		\
+	ia64_native_set_rr(0x6000000000000000UL, (val3));		\
+	ia64_native_set_rr(0x8000000000000000UL, (val4));		\
+} while (0)
+
+/*
+ * Force an unresolved reference if someone tries to use
+ * ia64_fetch_and_add() with a bad value.
+ */
+extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
+extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
+
+#define IA64_FETCHADD(tmp,v,n,sz,sem)						\
+({										\
+	switch (sz) {								\
+	      case 4:								\
+	        tmp = ia64_fetchadd4_##sem((unsigned int *) v, n);		\
+		break;								\
+										\
+	      case 8:								\
+	        tmp = ia64_fetchadd8_##sem((unsigned long *) v, n);		\
+		break;								\
+										\
+	      default:								\
+		__bad_size_for_ia64_fetch_and_add();				\
+	}									\
+})
+
+#define ia64_fetchadd(i,v,sem)								\
+({											\
+	__u64 _tmp;									\
+	volatile __typeof__(*(v)) *_v = (v);						\
+	/* Can't use a switch () here: gcc isn't always smart enough for that... */	\
+	if ((i) == -16)									\
+		IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);			\
+	else if ((i) == -8)								\
+		IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);				\
+	else if ((i) == -4)								\
+		IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);				\
+	else if ((i) == -1)								\
+		IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);				\
+	else if ((i) == 1)								\
+		IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);				\
+	else if ((i) == 4)								\
+		IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);				\
+	else if ((i) == 8)								\
+		IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);				\
+	else if ((i) == 16)								\
+		IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);				\
+	else										\
+		_tmp = __bad_increment_for_ia64_fetch_and_add();			\
+	(__typeof__(*(v))) (_tmp);	/* return old value */				\
+})
+
+#define ia64_fetch_and_add(i,v)	(ia64_fetchadd(i, v, rel) + (i)) /* return new value */
+
+#endif
+
+
+#ifndef __ASSEMBLY__
+
+#define IA64_INTRINSIC_API(name)	ia64_native_ ## name
+#define IA64_INTRINSIC_MACRO(name)	ia64_native_ ## name
+
+
+/************************************************/
+/* Instructions paravirtualized for correctness */
+/************************************************/
+/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */
+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
+ * is not currently used (though it may be in a long-format VHPT system!)
+ */
+#define ia64_fc				IA64_INTRINSIC_API(fc)
+#define ia64_thash			IA64_INTRINSIC_API(thash)
+#define ia64_get_cpuid			IA64_INTRINSIC_API(get_cpuid)
+#define ia64_get_pmd			IA64_INTRINSIC_API(get_pmd)
+
+
+/************************************************/
+/* Instructions paravirtualized for performance */
+/************************************************/
+#define ia64_ssm			IA64_INTRINSIC_MACRO(ssm)
+#define ia64_rsm			IA64_INTRINSIC_MACRO(rsm)
+#define ia64_getreg			IA64_INTRINSIC_MACRO(getreg)
+#define ia64_setreg			IA64_INTRINSIC_API(setreg)
+#define ia64_set_rr			IA64_INTRINSIC_API(set_rr)
+#define ia64_get_rr			IA64_INTRINSIC_API(get_rr)
+#define ia64_ptcga			IA64_INTRINSIC_API(ptcga)
+#define ia64_get_psr_i			IA64_INTRINSIC_API(get_psr_i)
+#define ia64_intrin_local_irq_restore	\
+	IA64_INTRINSIC_API(intrin_local_irq_restore)
+#define ia64_set_rr0_to_rr4		IA64_INTRINSIC_API(set_rr0_to_rr4)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_IA64_INTRINSICS_H */
diff --git a/arch/ia64/include/uapi/asm/ioctl.h b/arch/ia64/include/uapi/asm/ioctl.h
new file mode 100644
index 0000000..b279fe0
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ioctl.h
@@ -0,0 +1 @@
+#include <asm-generic/ioctl.h>
diff --git a/arch/ia64/include/uapi/asm/ioctls.h b/arch/ia64/include/uapi/asm/ioctls.h
new file mode 100644
index 0000000..f3aab55
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ioctls.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_IOCTLS_H
+#define _ASM_IA64_IOCTLS_H
+
+#include <asm-generic/ioctls.h>
+
+#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/arch/ia64/include/uapi/asm/ipcbuf.h b/arch/ia64/include/uapi/asm/ipcbuf.h
new file mode 100644
index 0000000..84c7e51
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ipcbuf.h
@@ -0,0 +1 @@
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/ia64/include/uapi/asm/mman.h b/arch/ia64/include/uapi/asm/mman.h
new file mode 100644
index 0000000..8740819
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/mman.h
@@ -0,0 +1,16 @@
+/*
+ * Based on <asm-i386/mman.h>.
+ *
+ * Modified 1998-2000, 2002
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_MMAN_H
+#define _UAPI_ASM_IA64_MMAN_H
+
+
+#include <asm-generic/mman.h>
+
+#define MAP_GROWSUP	0x0200		/* register stack-like segment */
+
+
+#endif /* _UAPI_ASM_IA64_MMAN_H */
diff --git a/arch/ia64/include/uapi/asm/msgbuf.h b/arch/ia64/include/uapi/asm/msgbuf.h
new file mode 100644
index 0000000..6c64c0d
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/msgbuf.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_MSGBUF_H
+#define _ASM_IA64_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long  msg_cbytes;	/* current number of bytes on queue */
+	unsigned long  msg_qnum;	/* number of messages in queue */
+	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long  __unused1;
+	unsigned long  __unused2;
+};
+
+#endif /* _ASM_IA64_MSGBUF_H */
diff --git a/arch/ia64/include/uapi/asm/param.h b/arch/ia64/include/uapi/asm/param.h
new file mode 100644
index 0000000..d7da41d
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/param.h
@@ -0,0 +1,29 @@
+/*
+ * Fundamental kernel parameters.
+ *
+ * Based on <asm-i386/param.h>.
+ *
+ * Modified 1998, 1999, 2002-2003
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_PARAM_H
+#define _UAPI_ASM_IA64_PARAM_H
+
+
+#define EXEC_PAGESIZE	65536
+
+#ifndef NOGROUP
+# define NOGROUP	(-1)
+#endif
+
+#define MAXHOSTNAMELEN	64	/* max length of hostname */
+
+#ifndef __KERNEL__
+   /*
+    * Technically, this is wrong, but some old apps still refer to it.  The proper way to
+    * get the HZ value is via sysconf(_SC_CLK_TCK).
+    */
+# define HZ 1024
+#endif
+
+#endif /* _UAPI_ASM_IA64_PARAM_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon.h b/arch/ia64/include/uapi/asm/perfmon.h
new file mode 100644
index 0000000..1a10a2d
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/perfmon.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ *               Stephane Eranian <eranian@hpl.hp.com>
+ */
+
+#ifndef _UAPI_ASM_IA64_PERFMON_H
+#define _UAPI_ASM_IA64_PERFMON_H
+
+/*
+ * perfmon commands supported on all CPU models
+ */
+#define PFM_WRITE_PMCS		0x01
+#define PFM_WRITE_PMDS		0x02
+#define PFM_READ_PMDS		0x03
+#define PFM_STOP		0x04
+#define PFM_START		0x05
+#define PFM_ENABLE		0x06 /* obsolete */
+#define PFM_DISABLE		0x07 /* obsolete */
+#define PFM_CREATE_CONTEXT	0x08
+#define PFM_DESTROY_CONTEXT	0x09 /* obsolete use close() */
+#define PFM_RESTART		0x0a
+#define PFM_PROTECT_CONTEXT	0x0b /* obsolete */
+#define PFM_GET_FEATURES	0x0c
+#define PFM_DEBUG		0x0d
+#define PFM_UNPROTECT_CONTEXT	0x0e /* obsolete */
+#define PFM_GET_PMC_RESET_VAL	0x0f
+#define PFM_LOAD_CONTEXT	0x10
+#define PFM_UNLOAD_CONTEXT	0x11
+
+/*
+ * PMU model specific commands (may not be supported on all PMU models)
+ */
+#define PFM_WRITE_IBRS		0x20
+#define PFM_WRITE_DBRS		0x21
+
+/*
+ * context flags
+ */
+#define PFM_FL_NOTIFY_BLOCK    	 0x01	/* block task on user level notifications */
+#define PFM_FL_SYSTEM_WIDE	 0x02	/* create a system wide context */
+#define PFM_FL_OVFL_NO_MSG	 0x80   /* do not post overflow/end messages for notification */
+
+/*
+ * event set flags
+ */
+#define PFM_SETFL_EXCL_IDLE      0x01   /* exclude idle task (syswide only) XXX: DO NOT USE YET */
+
+/*
+ * PMC flags
+ */
+#define PFM_REGFL_OVFL_NOTIFY	0x1	/* send notification on overflow */
+#define PFM_REGFL_RANDOM	0x2	/* randomize sampling interval   */
+
+/*
+ * PMD/PMC/IBR/DBR return flags (ignored on input)
+ *
+ * Those flags are used on output and must be checked in case EAGAIN is returned
+ * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
+ */
+#define PFM_REG_RETFL_NOTAVAIL	(1UL<<31) /* set if register is implemented but not available */
+#define PFM_REG_RETFL_EINVAL	(1UL<<30) /* set if register entry is invalid */
+#define PFM_REG_RETFL_MASK	(PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
+
+#define PFM_REG_HAS_ERROR(flag)	(((flag) & PFM_REG_RETFL_MASK) != 0)
+
+typedef unsigned char pfm_uuid_t[16];	/* custom sampling buffer identifier type */
+
+/*
+ * Request structure used to define a context
+ */
+typedef struct {
+	pfm_uuid_t     ctx_smpl_buf_id;	 /* which buffer format to use (if needed) */
+	unsigned long  ctx_flags;	 /* noblock/block */
+	unsigned short ctx_nextra_sets;	 /* number of extra event sets (you always get 1) */
+	unsigned short ctx_reserved1;	 /* for future use */
+	int	       ctx_fd;		 /* return arg: unique identification for context */
+	void	       *ctx_smpl_vaddr;	 /* return arg: virtual address of sampling buffer, is used */
+	unsigned long  ctx_reserved2[11];/* for future use */
+} pfarg_context_t;
+
+/*
+ * Request structure used to write/read a PMC or PMD
+ */
+typedef struct {
+	unsigned int	reg_num;	   /* which register */
+	unsigned short	reg_set;	   /* event set for this register */
+	unsigned short	reg_reserved1;	   /* for future use */
+
+	unsigned long	reg_value;	   /* initial pmc/pmd value */
+	unsigned long	reg_flags;	   /* input: pmc/pmd flags, return: reg error */
+
+	unsigned long	reg_long_reset;	   /* reset after buffer overflow notification */
+	unsigned long	reg_short_reset;   /* reset after counter overflow */
+
+	unsigned long	reg_reset_pmds[4]; /* which other counters to reset on overflow */
+	unsigned long	reg_random_seed;   /* seed value when randomization is used */
+	unsigned long	reg_random_mask;   /* bitmask used to limit random value */
+	unsigned long   reg_last_reset_val;/* return: PMD last reset value */
+
+	unsigned long	reg_smpl_pmds[4];  /* which pmds are accessed when PMC overflows */
+	unsigned long	reg_smpl_eventid;  /* opaque sampling event identifier */
+
+	unsigned long   reg_reserved2[3];   /* for future use */
+} pfarg_reg_t;
+
+typedef struct {
+	unsigned int	dbreg_num;		/* which debug register */
+	unsigned short	dbreg_set;		/* event set for this register */
+	unsigned short	dbreg_reserved1;	/* for future use */
+	unsigned long	dbreg_value;		/* value for debug register */
+	unsigned long	dbreg_flags;		/* return: dbreg error */
+	unsigned long	dbreg_reserved2[1];	/* for future use */
+} pfarg_dbreg_t;
+
+typedef struct {
+	unsigned int	ft_version;	/* perfmon: major [16-31], minor [0-15] */
+	unsigned int	ft_reserved;	/* reserved for future use */
+	unsigned long	reserved[4];	/* for future use */
+} pfarg_features_t;
+
+typedef struct {
+	pid_t		load_pid;	   /* process to load the context into */
+	unsigned short	load_set;	   /* first event set to load */
+	unsigned short	load_reserved1;	   /* for future use */
+	unsigned long	load_reserved2[3]; /* for future use */
+} pfarg_load_t;
+
+typedef struct {
+	int		msg_type;		/* generic message header */
+	int		msg_ctx_fd;		/* generic message header */
+	unsigned long	msg_ovfl_pmds[4];	/* which PMDs overflowed */
+	unsigned short  msg_active_set;		/* active set at the time of overflow */
+	unsigned short  msg_reserved1;		/* for future use */
+	unsigned int    msg_reserved2;		/* for future use */
+	unsigned long	msg_tstamp;		/* for perf tuning/debug */
+} pfm_ovfl_msg_t;
+
+typedef struct {
+	int		msg_type;		/* generic message header */
+	int		msg_ctx_fd;		/* generic message header */
+	unsigned long	msg_tstamp;		/* for perf tuning */
+} pfm_end_msg_t;
+
+typedef struct {
+	int		msg_type;		/* type of the message */
+	int		msg_ctx_fd;		/* unique identifier for the context */
+	unsigned long	msg_tstamp;		/* for perf tuning */
+} pfm_gen_msg_t;
+
+#define PFM_MSG_OVFL	1	/* an overflow happened */
+#define PFM_MSG_END	2	/* task to which context was attached ended */
+
+typedef union {
+	pfm_ovfl_msg_t	pfm_ovfl_msg;
+	pfm_end_msg_t	pfm_end_msg;
+	pfm_gen_msg_t	pfm_gen_msg;
+} pfm_msg_t;
+
+/*
+ * Define the version numbers for both perfmon as a whole and the sampling buffer format.
+ */
+#define PFM_VERSION_MAJ		 2U
+#define PFM_VERSION_MIN		 0U
+#define PFM_VERSION		 (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
+#define PFM_VERSION_MAJOR(x)	 (((x)>>16) & 0xffff)
+#define PFM_VERSION_MINOR(x)	 ((x) & 0xffff)
+
+
+/*
+ * miscellaneous architected definitions
+ */
+#define PMU_FIRST_COUNTER	4	/* first counting monitor (PMC/PMD) */
+#define PMU_MAX_PMCS		256	/* maximum architected number of PMC registers */
+#define PMU_MAX_PMDS		256	/* maximum architected number of PMD registers */
+
+
+#endif /* _UAPI_ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon_default_smpl.h b/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
new file mode 100644
index 0000000..a2d560c
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ *               Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * This file implements the default sampling buffer format
+ * for Linux/ia64 perfmon subsystem.
+ */
+#ifndef __PERFMON_DEFAULT_SMPL_H__
+#define __PERFMON_DEFAULT_SMPL_H__ 1
+
+#define PFM_DEFAULT_SMPL_UUID { \
+		0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
+
+/*
+ * format specific parameters (passed at context creation)
+ */
+typedef struct {
+	unsigned long buf_size;		/* size of the buffer in bytes */
+	unsigned int  flags;		/* buffer specific flags */
+	unsigned int  res1;		/* for future use */
+	unsigned long reserved[2];	/* for future use */
+} pfm_default_smpl_arg_t;
+
+/*
+ * combined context+format specific structure. Can be passed
+ * to PFM_CONTEXT_CREATE
+ */
+typedef struct {
+	pfarg_context_t		ctx_arg;
+	pfm_default_smpl_arg_t	buf_arg;
+} pfm_default_smpl_ctx_arg_t;
+
+/*
+ * This header is at the beginning of the sampling buffer returned to the user.
+ * It is directly followed by the first record.
+ */
+typedef struct {
+	unsigned long	hdr_count;		/* how many valid entries */
+	unsigned long	hdr_cur_offs;		/* current offset from top of buffer */
+	unsigned long	hdr_reserved2;		/* reserved for future use */
+
+	unsigned long	hdr_overflows;		/* how many times the buffer overflowed */
+	unsigned long   hdr_buf_size;		/* how many bytes in the buffer */
+
+	unsigned int	hdr_version;		/* contains perfmon version (smpl format diffs) */
+	unsigned int	hdr_reserved1;		/* for future use */
+	unsigned long	hdr_reserved[10];	/* for future use */
+} pfm_default_smpl_hdr_t;
+
+/*
+ * Entry header in the sampling buffer.  The header is directly followed
+ * with the values of the PMD registers of interest saved in increasing 
+ * index order: PMD4, PMD5, and so on. How many PMDs are present depends 
+ * on how the session was programmed.
+ *
+ * In the case where multiple counters overflow at the same time, multiple
+ * entries are written consecutively.
+ *
+ * last_reset_value member indicates the initial value of the overflowed PMD. 
+ */
+typedef struct {
+        int             pid;                    /* thread id (for NPTL, this is gettid()) */
+        unsigned char   reserved1[3];           /* reserved for future use */
+        unsigned char   ovfl_pmd;               /* index of overflowed PMD */
+
+        unsigned long   last_reset_val;         /* initial value of overflowed PMD */
+        unsigned long   ip;                     /* where did the overflow interrupt happened  */
+        unsigned long   tstamp;                 /* ar.itc when entering perfmon intr. handler */
+
+        unsigned short  cpu;                    /* cpu on which the overflow occurred */
+        unsigned short  set;                    /* event set active when overflow occurred   */
+        int    		tgid;              	/* thread group id (for NPTL, this is getpid()) */
+} pfm_default_smpl_entry_t;
+
+#define PFM_DEFAULT_MAX_PMDS		64 /* how many pmds supported by data structures (sizeof(unsigned long) */
+#define PFM_DEFAULT_MAX_ENTRY_SIZE	(sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
+#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE	(sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
+
+#define PFM_DEFAULT_SMPL_VERSION_MAJ	2U
+#define PFM_DEFAULT_SMPL_VERSION_MIN	0U
+#define PFM_DEFAULT_SMPL_VERSION	(((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
+
+#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/arch/ia64/include/uapi/asm/poll.h b/arch/ia64/include/uapi/asm/poll.h
new file mode 100644
index 0000000..c98509d
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/poll.h
@@ -0,0 +1 @@
+#include <asm-generic/poll.h>
diff --git a/arch/ia64/include/uapi/asm/posix_types.h b/arch/ia64/include/uapi/asm/posix_types.h
new file mode 100644
index 0000000..99ee1d6
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/posix_types.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_IA64_POSIX_TYPES_H
+#define _ASM_IA64_POSIX_TYPES_H
+
+typedef unsigned long	__kernel_sigset_t;	/* at least 32 bits */
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/arch/ia64/include/uapi/asm/ptrace.h b/arch/ia64/include/uapi/asm/ptrace.h
new file mode 100644
index 0000000..0a02f63
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ptrace.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *	Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2003 Intel Co
+ *	Suresh Siddha <suresh.b.siddha@intel.com>
+ *	Fenghua Yu <fenghua.yu@intel.com>
+ *	Arun Sharma <arun.sharma@intel.com>
+ *
+ * 12/07/98	S. Eranian	added pt_regs & switch_stack
+ * 12/21/98	D. Mosberger	updated to match latest code
+ *  6/17/99	D. Mosberger	added second unat member to "struct switch_stack"
+ *
+ */
+#ifndef _UAPI_ASM_IA64_PTRACE_H
+#define _UAPI_ASM_IA64_PTRACE_H
+
+/*
+ * When a user process is blocked, its state looks as follows:
+ *
+ *            +----------------------+	-------	IA64_STK_OFFSET
+ *     	      |			     |	 ^
+ *            | struct pt_regs       |	 |
+ *	      |			     |	 |
+ *            +----------------------+	 |
+ *	      |			     |	 |
+ *     	      |	   memory stack	     |	 |
+ *	      |	(growing downwards)  |	 |
+ *	      //.....................//	 |
+ *					 |
+ *	      //.....................//	 |
+ *	      |			     |	 |
+ *            +----------------------+	 |
+ *            | struct switch_stack  |	 |
+ *	      |			     |	 |
+ *	      +----------------------+	 |
+ *	      |			     |	 |
+ *	      //.....................//	 |
+ *					 |
+ *	      //.....................//	 |
+ *	      |			     |	 |
+ *	      |	 register stack	     |	 |
+ *	      |	(growing upwards)    |	 |
+ *            |			     |	 |
+ *	      +----------------------+	 |  ---	IA64_RBS_OFFSET
+ *            |  struct thread_info  |	 |  ^
+ *	      +----------------------+	 |  |
+ *	      |			     |	 |  |
+ *            |  struct task_struct  |	 |  |
+ * current -> |			     |   |  |
+ *	      +----------------------+ -------
+ *
+ * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
+ * This is because ar.ec is saved as part of ar.pfs.
+ */
+
+
+#include <asm/fpu.h>
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are saved on system
+ * calls.
+ *
+ * We don't save all floating point register because the kernel
+ * is compiled to use only a very small subset, so the other are
+ * untouched.
+ *
+ * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
+ * (because the memory stack pointer MUST ALWAYS be aligned this way)
+ *
+ */
+struct pt_regs {
+	/* The following registers are saved by SAVE_MIN: */
+	unsigned long b6;		/* scratch */
+	unsigned long b7;		/* scratch */
+
+	unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
+	unsigned long ar_ssd;           /* reserved for future use (scratch) */
+
+	unsigned long r8;		/* scratch (return value register 0) */
+	unsigned long r9;		/* scratch (return value register 1) */
+	unsigned long r10;		/* scratch (return value register 2) */
+	unsigned long r11;		/* scratch (return value register 3) */
+
+	unsigned long cr_ipsr;		/* interrupted task's psr */
+	unsigned long cr_iip;		/* interrupted task's instruction pointer */
+	/*
+	 * interrupted task's function state; if bit 63 is cleared, it
+	 * contains syscall's ar.pfs.pfm:
+	 */
+	unsigned long cr_ifs;
+
+	unsigned long ar_unat;		/* interrupted task's NaT register (preserved) */
+	unsigned long ar_pfs;		/* prev function state  */
+	unsigned long ar_rsc;		/* RSE configuration */
+	/* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
+	unsigned long ar_rnat;		/* RSE NaT */
+	unsigned long ar_bspstore;	/* RSE bspstore */
+
+	unsigned long pr;		/* 64 predicate registers (1 bit each) */
+	unsigned long b0;		/* return pointer (bp) */
+	unsigned long loadrs;		/* size of dirty partition << 16 */
+
+	unsigned long r1;		/* the gp pointer */
+	unsigned long r12;		/* interrupted task's memory stack pointer */
+	unsigned long r13;		/* thread pointer */
+
+	unsigned long ar_fpsr;		/* floating point status (preserved) */
+	unsigned long r15;		/* scratch */
+
+	/* The remaining registers are NOT saved for system calls.  */
+
+	unsigned long r14;		/* scratch */
+	unsigned long r2;		/* scratch */
+	unsigned long r3;		/* scratch */
+
+	/* The following registers are saved by SAVE_REST: */
+	unsigned long r16;		/* scratch */
+	unsigned long r17;		/* scratch */
+	unsigned long r18;		/* scratch */
+	unsigned long r19;		/* scratch */
+	unsigned long r20;		/* scratch */
+	unsigned long r21;		/* scratch */
+	unsigned long r22;		/* scratch */
+	unsigned long r23;		/* scratch */
+	unsigned long r24;		/* scratch */
+	unsigned long r25;		/* scratch */
+	unsigned long r26;		/* scratch */
+	unsigned long r27;		/* scratch */
+	unsigned long r28;		/* scratch */
+	unsigned long r29;		/* scratch */
+	unsigned long r30;		/* scratch */
+	unsigned long r31;		/* scratch */
+
+	unsigned long ar_ccv;		/* compare/exchange value (scratch) */
+
+	/*
+	 * Floating point registers that the kernel considers scratch:
+	 */
+	struct ia64_fpreg f6;		/* scratch */
+	struct ia64_fpreg f7;		/* scratch */
+	struct ia64_fpreg f8;		/* scratch */
+	struct ia64_fpreg f9;		/* scratch */
+	struct ia64_fpreg f10;		/* scratch */
+	struct ia64_fpreg f11;		/* scratch */
+};
+
+/*
+ * This structure contains the addition registers that need to
+ * preserved across a context switch.  This generally consists of
+ * "preserved" registers.
+ */
+struct switch_stack {
+	unsigned long caller_unat;	/* user NaT collection register (preserved) */
+	unsigned long ar_fpsr;		/* floating-point status register */
+
+	struct ia64_fpreg f2;		/* preserved */
+	struct ia64_fpreg f3;		/* preserved */
+	struct ia64_fpreg f4;		/* preserved */
+	struct ia64_fpreg f5;		/* preserved */
+
+	struct ia64_fpreg f12;		/* scratch, but untouched by kernel */
+	struct ia64_fpreg f13;		/* scratch, but untouched by kernel */
+	struct ia64_fpreg f14;		/* scratch, but untouched by kernel */
+	struct ia64_fpreg f15;		/* scratch, but untouched by kernel */
+	struct ia64_fpreg f16;		/* preserved */
+	struct ia64_fpreg f17;		/* preserved */
+	struct ia64_fpreg f18;		/* preserved */
+	struct ia64_fpreg f19;		/* preserved */
+	struct ia64_fpreg f20;		/* preserved */
+	struct ia64_fpreg f21;		/* preserved */
+	struct ia64_fpreg f22;		/* preserved */
+	struct ia64_fpreg f23;		/* preserved */
+	struct ia64_fpreg f24;		/* preserved */
+	struct ia64_fpreg f25;		/* preserved */
+	struct ia64_fpreg f26;		/* preserved */
+	struct ia64_fpreg f27;		/* preserved */
+	struct ia64_fpreg f28;		/* preserved */
+	struct ia64_fpreg f29;		/* preserved */
+	struct ia64_fpreg f30;		/* preserved */
+	struct ia64_fpreg f31;		/* preserved */
+
+	unsigned long r4;		/* preserved */
+	unsigned long r5;		/* preserved */
+	unsigned long r6;		/* preserved */
+	unsigned long r7;		/* preserved */
+
+	unsigned long b0;		/* so we can force a direct return in copy_thread */
+	unsigned long b1;
+	unsigned long b2;
+	unsigned long b3;
+	unsigned long b4;
+	unsigned long b5;
+
+	unsigned long ar_pfs;		/* previous function state */
+	unsigned long ar_lc;		/* loop counter (preserved) */
+	unsigned long ar_unat;		/* NaT bits for r4-r7 */
+	unsigned long ar_rnat;		/* RSE NaT collection register */
+	unsigned long ar_bspstore;	/* RSE dirty base (preserved) */
+	unsigned long pr;		/* 64 predicate registers (1 bit each) */
+};
+
+
+/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
+struct pt_all_user_regs {
+	unsigned long nat;
+	unsigned long cr_iip;
+	unsigned long cfm;
+	unsigned long cr_ipsr;
+	unsigned long pr;
+
+	unsigned long gr[32];
+	unsigned long br[8];
+	unsigned long ar[128];
+	struct ia64_fpreg fr[128];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/* indices to application-registers array in pt_all_user_regs */
+#define PT_AUR_RSC	16
+#define PT_AUR_BSP	17
+#define PT_AUR_BSPSTORE	18
+#define PT_AUR_RNAT	19
+#define PT_AUR_CCV	32
+#define PT_AUR_UNAT	36
+#define PT_AUR_FPSR	40
+#define PT_AUR_PFS	64
+#define PT_AUR_LC	65
+#define PT_AUR_EC	66
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK	12	/* resume execution until next branch */
+#define PTRACE_OLD_GETSIGINFO	13	/* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>)  */
+#define PTRACE_OLD_SETSIGINFO	14	/* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>)  */
+#define PTRACE_GETREGS		18	/* get all registers (pt_all_user_regs) in one shot */
+#define PTRACE_SETREGS		19	/* set all registers (pt_all_user_regs) in one shot */
+
+#define PTRACE_OLDSETOPTIONS	21
+
+#endif /* _UAPI_ASM_IA64_PTRACE_H */
diff --git a/arch/ia64/include/uapi/asm/ptrace_offsets.h b/arch/ia64/include/uapi/asm/ptrace_offsets.h
new file mode 100644
index 0000000..b712773
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ptrace_offsets.h
@@ -0,0 +1,268 @@
+#ifndef _ASM_IA64_PTRACE_OFFSETS_H
+#define _ASM_IA64_PTRACE_OFFSETS_H
+
+/*
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
+ * virtual structure that would have the following definition:
+ *
+ *	struct uarea {
+ *		struct ia64_fpreg fph[96];		// f32-f127
+ *		unsigned long nat_bits;
+ *		unsigned long empty1;
+ *		struct ia64_fpreg f2;			// f2-f5
+ *			:
+ *		struct ia64_fpreg f5;
+ *		struct ia64_fpreg f10;			// f10-f31
+ *			:
+ *		struct ia64_fpreg f31;
+ *		unsigned long r4;			// r4-r7
+ *			:
+ *		unsigned long r7;
+ *		unsigned long b1;			// b1-b5
+ *			:
+ *		unsigned long b5;
+ *		unsigned long ar_ec;
+ *		unsigned long ar_lc;
+ *		unsigned long empty2[5];
+ *		unsigned long cr_ipsr;
+ *		unsigned long cr_iip;
+ *		unsigned long cfm;
+ *		unsigned long ar_unat;
+ *		unsigned long ar_pfs;
+ *		unsigned long ar_rsc;
+ *		unsigned long ar_rnat;
+ *		unsigned long ar_bspstore;
+ *		unsigned long pr;
+ *		unsigned long b6;
+ *		unsigned long ar_bsp;
+ *		unsigned long r1;
+ *		unsigned long r2;
+ *		unsigned long r3;
+ *		unsigned long r12;
+ *		unsigned long r13;
+ *		unsigned long r14;
+ *		unsigned long r15;
+ *		unsigned long r8;
+ *		unsigned long r9;
+ *		unsigned long r10;
+ *		unsigned long r11;
+ *		unsigned long r16;
+ *			:
+ *		unsigned long r31;
+ *		unsigned long ar_ccv;
+ *		unsigned long ar_fpsr;
+ *		unsigned long b0;
+ *		unsigned long b7;
+ *		unsigned long f6;
+ *		unsigned long f7;
+ *		unsigned long f8;
+ *		unsigned long f9;
+ *		unsigned long ar_csd;
+ *		unsigned long ar_ssd;
+ *		unsigned long rsvd1[710];
+ *		unsigned long dbr[8];
+ *		unsigned long rsvd2[504];
+ *		unsigned long ibr[8];
+ *		unsigned long rsvd3[504];
+ *		unsigned long pmd[4];
+ *	}
+ */
+
+/* fph: */
+#define PT_F32			0x0000
+#define PT_F33			0x0010
+#define PT_F34			0x0020
+#define PT_F35			0x0030
+#define PT_F36			0x0040
+#define PT_F37			0x0050
+#define PT_F38			0x0060
+#define PT_F39			0x0070
+#define PT_F40			0x0080
+#define PT_F41			0x0090
+#define PT_F42			0x00a0
+#define PT_F43			0x00b0
+#define PT_F44			0x00c0
+#define PT_F45			0x00d0
+#define PT_F46			0x00e0
+#define PT_F47			0x00f0
+#define PT_F48			0x0100
+#define PT_F49			0x0110
+#define PT_F50			0x0120
+#define PT_F51			0x0130
+#define PT_F52			0x0140
+#define PT_F53			0x0150
+#define PT_F54			0x0160
+#define PT_F55			0x0170
+#define PT_F56			0x0180
+#define PT_F57			0x0190
+#define PT_F58			0x01a0
+#define PT_F59			0x01b0
+#define PT_F60			0x01c0
+#define PT_F61			0x01d0
+#define PT_F62			0x01e0
+#define PT_F63			0x01f0
+#define PT_F64			0x0200
+#define PT_F65			0x0210
+#define PT_F66			0x0220
+#define PT_F67			0x0230
+#define PT_F68			0x0240
+#define PT_F69			0x0250
+#define PT_F70			0x0260
+#define PT_F71			0x0270
+#define PT_F72			0x0280
+#define PT_F73			0x0290
+#define PT_F74			0x02a0
+#define PT_F75			0x02b0
+#define PT_F76			0x02c0
+#define PT_F77			0x02d0
+#define PT_F78			0x02e0
+#define PT_F79			0x02f0
+#define PT_F80			0x0300
+#define PT_F81			0x0310
+#define PT_F82			0x0320
+#define PT_F83			0x0330
+#define PT_F84			0x0340
+#define PT_F85			0x0350
+#define PT_F86			0x0360
+#define PT_F87			0x0370
+#define PT_F88			0x0380
+#define PT_F89			0x0390
+#define PT_F90			0x03a0
+#define PT_F91			0x03b0
+#define PT_F92			0x03c0
+#define PT_F93			0x03d0
+#define PT_F94			0x03e0
+#define PT_F95			0x03f0
+#define PT_F96			0x0400
+#define PT_F97			0x0410
+#define PT_F98			0x0420
+#define PT_F99			0x0430
+#define PT_F100			0x0440
+#define PT_F101			0x0450
+#define PT_F102			0x0460
+#define PT_F103			0x0470
+#define PT_F104			0x0480
+#define PT_F105			0x0490
+#define PT_F106			0x04a0
+#define PT_F107			0x04b0
+#define PT_F108			0x04c0
+#define PT_F109			0x04d0
+#define PT_F110			0x04e0
+#define PT_F111			0x04f0
+#define PT_F112			0x0500
+#define PT_F113			0x0510
+#define PT_F114			0x0520
+#define PT_F115			0x0530
+#define PT_F116			0x0540
+#define PT_F117			0x0550
+#define PT_F118			0x0560
+#define PT_F119			0x0570
+#define PT_F120			0x0580
+#define PT_F121			0x0590
+#define PT_F122			0x05a0
+#define PT_F123			0x05b0
+#define PT_F124			0x05c0
+#define PT_F125			0x05d0
+#define PT_F126			0x05e0
+#define PT_F127			0x05f0
+
+#define PT_NAT_BITS		0x0600
+
+#define PT_F2			0x0610
+#define PT_F3			0x0620
+#define PT_F4			0x0630
+#define PT_F5			0x0640
+#define PT_F10			0x0650
+#define PT_F11			0x0660
+#define PT_F12			0x0670
+#define PT_F13			0x0680
+#define PT_F14			0x0690
+#define PT_F15			0x06a0
+#define PT_F16			0x06b0
+#define PT_F17			0x06c0
+#define PT_F18			0x06d0
+#define PT_F19			0x06e0
+#define PT_F20			0x06f0
+#define PT_F21			0x0700
+#define PT_F22			0x0710
+#define PT_F23			0x0720
+#define PT_F24			0x0730
+#define PT_F25			0x0740
+#define PT_F26			0x0750
+#define PT_F27			0x0760
+#define PT_F28			0x0770
+#define PT_F29			0x0780
+#define PT_F30			0x0790
+#define PT_F31			0x07a0
+#define PT_R4			0x07b0
+#define PT_R5			0x07b8
+#define PT_R6			0x07c0
+#define PT_R7			0x07c8
+
+#define PT_B1			0x07d8
+#define PT_B2			0x07e0
+#define PT_B3			0x07e8
+#define PT_B4			0x07f0
+#define PT_B5			0x07f8
+
+#define PT_AR_EC		0x0800
+#define PT_AR_LC		0x0808
+
+#define PT_CR_IPSR		0x0830
+#define PT_CR_IIP		0x0838
+#define PT_CFM			0x0840
+#define PT_AR_UNAT		0x0848
+#define PT_AR_PFS		0x0850
+#define PT_AR_RSC		0x0858
+#define PT_AR_RNAT		0x0860
+#define PT_AR_BSPSTORE		0x0868
+#define PT_PR			0x0870
+#define PT_B6			0x0878
+#define PT_AR_BSP		0x0880	/* note: this points to the *end* of the backing store! */
+#define PT_R1			0x0888
+#define PT_R2			0x0890
+#define PT_R3			0x0898
+#define PT_R12			0x08a0
+#define PT_R13			0x08a8
+#define PT_R14			0x08b0
+#define PT_R15			0x08b8
+#define PT_R8 			0x08c0
+#define PT_R9			0x08c8
+#define PT_R10			0x08d0
+#define PT_R11			0x08d8
+#define PT_R16			0x08e0
+#define PT_R17			0x08e8
+#define PT_R18			0x08f0
+#define PT_R19			0x08f8
+#define PT_R20			0x0900
+#define PT_R21			0x0908
+#define PT_R22			0x0910
+#define PT_R23			0x0918
+#define PT_R24			0x0920
+#define PT_R25			0x0928
+#define PT_R26			0x0930
+#define PT_R27			0x0938
+#define PT_R28			0x0940
+#define PT_R29			0x0948
+#define PT_R30			0x0950
+#define PT_R31			0x0958
+#define PT_AR_CCV		0x0960
+#define PT_AR_FPSR		0x0968
+#define PT_B0			0x0970
+#define PT_B7			0x0978
+#define PT_F6			0x0980
+#define PT_F7			0x0990
+#define PT_F8			0x09a0
+#define PT_F9			0x09b0
+#define PT_AR_CSD		0x09c0
+#define PT_AR_SSD		0x09c8
+
+#define PT_DBR			0x2000	/* data breakpoint registers */
+#define PT_IBR			0x3000	/* instruction breakpoint registers */
+#define PT_PMD			0x4000	/* performance monitoring counters */
+
+#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff --git a/arch/ia64/include/uapi/asm/resource.h b/arch/ia64/include/uapi/asm/resource.h
new file mode 100644
index 0000000..ba2272a
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/resource.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_IA64_RESOURCE_H
+#define _ASM_IA64_RESOURCE_H
+
+#include <asm/ustack.h>
+#include <asm-generic/resource.h>
+
+#endif /* _ASM_IA64_RESOURCE_H */
diff --git a/arch/ia64/include/uapi/asm/rse.h b/arch/ia64/include/uapi/asm/rse.h
new file mode 100644
index 0000000..02830a3
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/rse.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_RSE_H
+#define _ASM_IA64_RSE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Register stack engine related helper functions.  This file may be
+ * used in applications, so be careful about the name-space and give
+ * some consideration to non-GNU C compilers (though __inline__ is
+ * fine).
+ */
+
+static __inline__ unsigned long
+ia64_rse_slot_num (unsigned long *addr)
+{
+	return (((unsigned long) addr) >> 3) & 0x3f;
+}
+
+/*
+ * Return TRUE if ADDR is the address of an RNAT slot.
+ */
+static __inline__ unsigned long
+ia64_rse_is_rnat_slot (unsigned long *addr)
+{
+	return ia64_rse_slot_num(addr) == 0x3f;
+}
+
+/*
+ * Returns the address of the RNAT slot that covers the slot at
+ * address SLOT_ADDR.
+ */
+static __inline__ unsigned long *
+ia64_rse_rnat_addr (unsigned long *slot_addr)
+{
+	return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
+}
+
+/*
+ * Calculate the number of registers in the dirty partition starting at BSPSTORE and
+ * ending at BSP.  This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
+ * ar.rnat.
+ */
+static __inline__ unsigned long
+ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
+{
+	unsigned long slots = (bsp - bspstore);
+
+	return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
+}
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static __inline__ unsigned long *
+ia64_rse_skip_regs (unsigned long *addr, long num_regs)
+{
+	long delta = ia64_rse_slot_num(addr) + num_regs;
+
+	if (num_regs < 0)
+		delta -= 0x3e;
+	return addr + num_regs + delta/0x3f;
+}
+
+#endif /* _ASM_IA64_RSE_H */
diff --git a/arch/ia64/include/uapi/asm/sembuf.h b/arch/ia64/include/uapi/asm/sembuf.h
new file mode 100644
index 0000000..1340fbc
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/sembuf.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_SEMBUF_H
+#define _ASM_IA64_SEMBUF_H
+
+/*
+ * The semid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+	__kernel_time_t	sem_otime;		/* last semop time */
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+};
+
+#endif /* _ASM_IA64_SEMBUF_H */
diff --git a/arch/ia64/include/uapi/asm/setup.h b/arch/ia64/include/uapi/asm/setup.h
new file mode 100644
index 0000000..8d56458
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/setup.h
@@ -0,0 +1,24 @@
+#ifndef __IA64_SETUP_H
+#define __IA64_SETUP_H
+
+#define COMMAND_LINE_SIZE	2048
+
+extern struct ia64_boot_param {
+	__u64 command_line;		/* physical address of command line arguments */
+	__u64 efi_systab;		/* physical address of EFI system table */
+	__u64 efi_memmap;		/* physical address of EFI memory map */
+	__u64 efi_memmap_size;		/* size of EFI memory map */
+	__u64 efi_memdesc_size;		/* size of an EFI memory map descriptor */
+	__u32 efi_memdesc_version;	/* memory descriptor version */
+	struct {
+		__u16 num_cols;	/* number of columns on console output device */
+		__u16 num_rows;	/* number of rows on console output device */
+		__u16 orig_x;	/* cursor's x position */
+		__u16 orig_y;	/* cursor's y position */
+	} console_info;
+	__u64 fpswa;		/* physical address of the fpswa interface */
+	__u64 initrd_start;
+	__u64 initrd_size;
+} *ia64_boot_param;
+
+#endif
diff --git a/arch/ia64/include/uapi/asm/shmbuf.h b/arch/ia64/include/uapi/asm/shmbuf.h
new file mode 100644
index 0000000..585002a
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/shmbuf.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_SHMBUF_H
+#define _ASM_IA64_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	__kernel_time_t		shm_ctime;	/* last change time */
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _ASM_IA64_SHMBUF_H */
diff --git a/arch/ia64/include/uapi/asm/sigcontext.h b/arch/ia64/include/uapi/asm/sigcontext.h
new file mode 100644
index 0000000..57ff777
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/sigcontext.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_IA64_SIGCONTEXT_H
+#define _ASM_IA64_SIGCONTEXT_H
+
+/*
+ * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/fpu.h>
+
+#define IA64_SC_FLAG_ONSTACK_BIT		0	/* is handler running on signal stack? */
+#define IA64_SC_FLAG_IN_SYSCALL_BIT		1	/* did signal interrupt a syscall? */
+#define IA64_SC_FLAG_FPH_VALID_BIT		2	/* is state in f[32]-f[127] valid? */
+
+#define IA64_SC_FLAG_ONSTACK		(1 << IA64_SC_FLAG_ONSTACK_BIT)
+#define IA64_SC_FLAG_IN_SYSCALL		(1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
+#define IA64_SC_FLAG_FPH_VALID		(1 << IA64_SC_FLAG_FPH_VALID_BIT)
+
+# ifndef __ASSEMBLY__
+
+/*
+ * Note on handling of register backing store: sc_ar_bsp contains the address that would
+ * be found in ar.bsp after executing a "cover" instruction the context in which the
+ * signal was raised.  If signal delivery required switching to an alternate signal stack
+ * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the
+ * imaginary "cover" instruction) is backed by the *alternate* signal stack, not the
+ * original one.  In this case, sc_rbs_base contains the base address of the new register
+ * backing store.  The number of registers in the dirty partition can be calculated as:
+ *
+ *   ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
+ *
+ */
+
+struct sigcontext {
+	unsigned long		sc_flags;	/* see manifest constants above */
+	unsigned long		sc_nat;		/* bit i == 1 iff scratch reg gr[i] is a NaT */
+	stack_t			sc_stack;	/* previously active stack */
+
+	unsigned long		sc_ip;		/* instruction pointer */
+	unsigned long		sc_cfm;		/* current frame marker */
+	unsigned long		sc_um;		/* user mask bits */
+	unsigned long		sc_ar_rsc;	/* register stack configuration register */
+	unsigned long		sc_ar_bsp;	/* backing store pointer */
+	unsigned long		sc_ar_rnat;	/* RSE NaT collection register */
+	unsigned long		sc_ar_ccv;	/* compare and exchange compare value register */
+	unsigned long		sc_ar_unat;	/* ar.unat of interrupted context */
+	unsigned long		sc_ar_fpsr;	/* floating-point status register */
+	unsigned long		sc_ar_pfs;	/* previous function state */
+	unsigned long		sc_ar_lc;	/* loop count register */
+	unsigned long		sc_pr;		/* predicate registers */
+	unsigned long		sc_br[8];	/* branch registers */
+	/* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
+	unsigned long		sc_gr[32];	/* general registers (static partition) */
+	struct ia64_fpreg	sc_fr[128];	/* floating-point registers */
+
+	unsigned long		sc_rbs_base;	/* NULL or new base of sighandler's rbs */
+	unsigned long		sc_loadrs;	/* see description above */
+
+	unsigned long		sc_ar25;	/* cmp8xchg16 uses this */
+	unsigned long		sc_ar26;	/* rsvd for scratch use */
+	unsigned long		sc_rsvd[12];	/* reserved for future use */
+	/*
+	 * The mask must come last so we can increase _NSIG_WORDS
+	 * without breaking binary compatibility.
+	 */
+	sigset_t		sc_mask;	/* signal mask to restore after handler returns */
+};
+
+# endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGCONTEXT_H */
diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h
new file mode 100644
index 0000000..bce9bc1
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/siginfo.h
@@ -0,0 +1,125 @@
+/*
+ * Based on <asm-i386/siginfo.h>.
+ *
+ * Modified 1998-2002
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_SIGINFO_H
+#define _UAPI_ASM_IA64_SIGINFO_H
+
+
+#define __ARCH_SI_PREAMBLE_SIZE	(4 * sizeof(int))
+
+#define HAVE_ARCH_SIGINFO_T
+#define HAVE_ARCH_COPY_SIGINFO
+#define HAVE_ARCH_COPY_SIGINFO_TO_USER
+
+#include <asm-generic/siginfo.h>
+
+typedef struct siginfo {
+	int si_signo;
+	int si_errno;
+	int si_code;
+	int __pad0;
+
+	union {
+		int _pad[SI_PAD_SIZE];
+
+		/* kill() */
+		struct {
+			pid_t _pid;		/* sender's pid */
+			uid_t _uid;		/* sender's uid */
+		} _kill;
+
+		/* POSIX.1b timers */
+		struct {
+			timer_t _tid;		/* timer id */
+			int _overrun;		/* overrun count */
+			char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
+			sigval_t _sigval;	/* must overlay ._rt._sigval! */
+			int _sys_private;	/* not to be passed to user */
+		} _timer;
+
+		/* POSIX.1b signals */
+		struct {
+			pid_t _pid;		/* sender's pid */
+			uid_t _uid;		/* sender's uid */
+			sigval_t _sigval;
+		} _rt;
+
+		/* SIGCHLD */
+		struct {
+			pid_t _pid;		/* which child */
+			uid_t _uid;		/* sender's uid */
+			int _status;		/* exit code */
+			clock_t _utime;
+			clock_t _stime;
+		} _sigchld;
+
+		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+		struct {
+			void __user *_addr;	/* faulting insn/memory ref. */
+			int _imm;		/* immediate value for "break" */
+			unsigned int _flags;	/* see below */
+			unsigned long _isr;	/* isr */
+			short _addr_lsb;	/* lsb of faulting address */
+			struct {
+				void __user *_lower;
+				void __user *_upper;
+			} _addr_bnd;
+		} _sigfault;
+
+		/* SIGPOLL */
+		struct {
+			long _band;	/* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
+			int _fd;
+		} _sigpoll;
+	} _sifields;
+} siginfo_t;
+
+#define si_imm		_sifields._sigfault._imm	/* as per UNIX SysV ABI spec */
+#define si_flags	_sifields._sigfault._flags
+/*
+ * si_isr is valid for SIGILL, SIGFPE, SIGSEGV, SIGBUS, and SIGTRAP provided that
+ * si_code is non-zero and __ISR_VALID is set in si_flags.
+ */
+#define si_isr		_sifields._sigfault._isr
+
+/*
+ * Flag values for si_flags:
+ */
+#define __ISR_VALID_BIT	0
+#define __ISR_VALID	(1 << __ISR_VALID_BIT)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_BADIADDR	(__SI_FAULT|9)	/* unimplemented instruction address */
+#define __ILL_BREAK	(__SI_FAULT|10)	/* illegal break */
+#define __ILL_BNDMOD	(__SI_FAULT|11)	/* bundle-update (modification) in progress */
+#undef NSIGILL
+#define NSIGILL		11
+
+/*
+ * SIGFPE si_codes
+ */
+#define __FPE_DECOVF	(__SI_FAULT|9)	/* decimal overflow */
+#define __FPE_DECDIV	(__SI_FAULT|10)	/* decimal division by zero */
+#define __FPE_DECERR	(__SI_FAULT|11)	/* packed decimal error */
+#define __FPE_INVASC	(__SI_FAULT|12)	/* invalid ASCII digit */
+#define __FPE_INVDEC	(__SI_FAULT|13)	/* invalid decimal digit */
+#undef NSIGFPE
+#define NSIGFPE		13
+
+/*
+ * SIGSEGV si_codes
+ */
+#define __SEGV_PSTKOVF	(__SI_FAULT|4)	/* paragraph stack overflow */
+#undef NSIGSEGV
+#define NSIGSEGV	4
+
+#undef NSIGTRAP
+#define NSIGTRAP	4
+
+
+#endif /* _UAPI_ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h
new file mode 100644
index 0000000..c0ea285
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/signal.h
@@ -0,0 +1,121 @@
+/*
+ * Modified 1998-2001, 2003
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x.  Hence the #ifdef __KERNEL__ ugliness.
+ */
+#ifndef _UAPI_ASM_IA64_SIGNAL_H
+#define _UAPI_ASM_IA64_SIGNAL_H
+
+
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGIOT		 6
+#define SIGBUS		 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGUSR1		10
+#define SIGSEGV		11
+#define SIGUSR2		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGSTKFLT	16
+#define SIGCHLD		17
+#define SIGCONT		18
+#define SIGSTOP		19
+#define SIGTSTP		20
+#define SIGTTIN		21
+#define SIGTTOU		22
+#define SIGURG		23
+#define SIGXCPU		24
+#define SIGXFSZ		25
+#define SIGVTALRM	26
+#define SIGPROF		27
+#define SIGWINCH	28
+#define SIGIO		29
+#define SIGPOLL		SIGIO
+/*
+#define SIGLOST		29
+*/
+#define SIGPWR		30
+#define SIGSYS		31
+/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
+#define	SIGUNUSED	31
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	_NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP	0x00000001
+#define SA_NOCLDWAIT	0x00000002
+#define SA_SIGINFO	0x00000004
+#define SA_ONSTACK	0x08000000
+#define SA_RESTART	0x10000000
+#define SA_NODEFER	0x40000000
+#define SA_RESETHAND	0x80000000
+
+#define SA_NOMASK	SA_NODEFER
+#define SA_ONESHOT	SA_RESETHAND
+
+#define SA_RESTORER	0x04000000
+
+/*
+ * The minimum stack size needs to be fairly large because we want to
+ * be sure that an app compiled for today's CPUs will continue to run
+ * on all future CPU models.  The CPU model matters because the signal
+ * frame needs to have space for the complete machine state, including
+ * all physical stacked registers.  The number of physical stacked
+ * registers is CPU model dependent, but given that the width of
+ * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
+ * more than 16KB of space.
+ */
+#if 1
+  /*
+   * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
+   * in wrong. ;-(  To preserve backwards compatibility, we leave the kernel at the
+   * incorrect value and fix libc only.
+   */
+# define MINSIGSTKSZ	131027	/* min. stack size for sigaltstack() */
+#else
+# define MINSIGSTKSZ	131072	/* min. stack size for sigaltstack() */
+#endif
+#define SIGSTKSZ	262144	/* default stack size for sigaltstack() */
+
+
+#include <asm-generic/signal-defs.h>
+
+# ifndef __ASSEMBLY__
+
+#  include <linux/types.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+typedef struct sigaltstack {
+	void __user *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _UAPI_ASM_IA64_SIGNAL_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
new file mode 100644
index 0000000..59be3d8
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -0,0 +1,97 @@
+#ifndef _ASM_IA64_SOCKET_H
+#define _ASM_IA64_SOCKET_H
+
+/*
+ * Socket related defines.
+ *
+ * Based on <asm-i386/socket.h>.
+ *
+ * Modified 1998-2000
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET	1
+
+#define SO_DEBUG	1
+#define SO_REUSEADDR	2
+#define SO_TYPE		3
+#define SO_ERROR	4
+#define SO_DONTROUTE	5
+#define SO_BROADCAST	6
+#define SO_SNDBUF	7
+#define SO_RCVBUF	8
+#define SO_SNDBUFFORCE	32
+#define SO_RCVBUFFORCE	33
+#define SO_KEEPALIVE	9
+#define SO_OOBINLINE	10
+#define SO_NO_CHECK	11
+#define SO_PRIORITY	12
+#define SO_LINGER	13
+#define SO_BSDCOMPAT	14
+#define SO_REUSEPORT	15
+#define SO_PASSCRED	16
+#define SO_PEERCRED	17
+#define SO_RCVLOWAT	18
+#define SO_SNDLOWAT	19
+#define SO_RCVTIMEO	20
+#define SO_SNDTIMEO	21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION		22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT	23
+#define SO_SECURITY_ENCRYPTION_NETWORK		24
+
+#define SO_BINDTODEVICE		25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER	26
+#define SO_DETACH_FILTER	27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
+
+#define SO_PEERNAME		28
+#define SO_TIMESTAMP		29
+#define SCM_TIMESTAMP		SO_TIMESTAMP
+
+#define SO_ACCEPTCONN		30
+
+#define SO_PEERSEC             31
+#define SO_PASSSEC		34
+#define SO_TIMESTAMPNS		35
+#define SCM_TIMESTAMPNS		SO_TIMESTAMPNS
+
+#define SO_MARK			36
+
+#define SO_TIMESTAMPING		37
+#define SCM_TIMESTAMPING	SO_TIMESTAMPING
+
+#define SO_PROTOCOL		38
+#define SO_DOMAIN		39
+
+#define SO_RXQ_OVFL             40
+
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+#define SO_PEEK_OFF		42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS		43
+
+#define SO_LOCK_FILTER		44
+
+#define SO_SELECT_ERR_QUEUE	45
+
+#define SO_BUSY_POLL		46
+
+#define SO_MAX_PACING_RATE	47
+
+#define SO_BPF_EXTENSIONS	48
+
+#define SO_INCOMING_CPU		49
+
+#define SO_ATTACH_BPF		50
+#define SO_DETACH_BPF		SO_DETACH_FILTER
+
+#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/sockios.h b/arch/ia64/include/uapi/asm/sockios.h
new file mode 100644
index 0000000..15c9246
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/sockios.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_SOCKIOS_H
+#define _ASM_IA64_SOCKIOS_H
+
+/*
+ * Socket-level I/O control calls.
+ *
+ * Based on <asm-i386/sockios.h>.
+ *
+ * Modified 1998, 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#define FIOSETOWN 	0x8901
+#define SIOCSPGRP	0x8902
+#define FIOGETOWN	0x8903
+#define SIOCGPGRP	0x8904
+#define SIOCATMARK	0x8905
+#define SIOCGSTAMP	0x8906		/* Get stamp (timeval) */
+#define SIOCGSTAMPNS	0x8907		/* Get stamp (timespec) */
+
+#endif /* _ASM_IA64_SOCKIOS_H */
diff --git a/arch/ia64/include/uapi/asm/stat.h b/arch/ia64/include/uapi/asm/stat.h
new file mode 100644
index 0000000..367bb90
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/stat.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_STAT_H
+#define _ASM_IA64_STAT_H
+
+/*
+ * Modified 1998, 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+struct stat {
+	unsigned long	st_dev;
+	unsigned long	st_ino;
+	unsigned long	st_nlink;
+	unsigned int	st_mode;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
+	unsigned int	__pad0;
+	unsigned long	st_rdev;
+	unsigned long	st_size;
+	unsigned long	st_atime;
+	unsigned long	st_atime_nsec;
+	unsigned long	st_mtime;
+	unsigned long	st_mtime_nsec;
+	unsigned long	st_ctime;
+	unsigned long	st_ctime_nsec;
+	unsigned long	st_blksize;
+	long		st_blocks;
+	unsigned long	__unused[3];
+};
+
+#define STAT_HAVE_NSEC 1
+
+struct ia64_oldstat {
+	unsigned int	st_dev;
+	unsigned int	st_ino;
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
+	unsigned int	st_rdev;
+	unsigned int	__pad1;
+	unsigned long	st_size;
+	unsigned long	st_atime;
+	unsigned long	st_mtime;
+	unsigned long	st_ctime;
+	unsigned int	st_blksize;
+	int		st_blocks;
+	unsigned int	__unused1;
+	unsigned int	__unused2;
+};
+
+#endif /* _ASM_IA64_STAT_H */
diff --git a/arch/ia64/include/uapi/asm/statfs.h b/arch/ia64/include/uapi/asm/statfs.h
new file mode 100644
index 0000000..1e58966
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/statfs.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_STATFS_H
+#define _ASM_IA64_STATFS_H
+
+/*
+ * Based on <asm-i386/statfs.h>.
+ *
+ * Modified 1998, 1999, 2003
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+/*
+ * We need compat_statfs64 to be packed, because the i386 ABI won't
+ * add padding at the end to bring it to a multiple of 8 bytes, but
+ * the IA64 ABI will.
+ */
+#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
+
+#include <asm-generic/statfs.h>
+
+#endif /* _ASM_IA64_STATFS_H */
diff --git a/arch/ia64/include/uapi/asm/swab.h b/arch/ia64/include/uapi/asm/swab.h
new file mode 100644
index 0000000..c89a8cb
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/swab.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_IA64_SWAB_H
+#define _ASM_IA64_SWAB_H
+
+/*
+ * Modified 1998, 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+#include <linux/compiler.h>
+
+static __inline__ __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+	__u64 result;
+
+	result = ia64_mux1(x, ia64_mux1_rev);
+	return result;
+}
+#define __arch_swab64 __arch_swab64
+
+static __inline__ __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+	return __arch_swab64(x) >> 32;
+}
+#define __arch_swab32 __arch_swab32
+
+static __inline__ __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+	return __arch_swab64(x) >> 48;
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* _ASM_IA64_SWAB_H */
diff --git a/arch/ia64/include/uapi/asm/termbits.h b/arch/ia64/include/uapi/asm/termbits.h
new file mode 100644
index 0000000..c009b94
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/termbits.h
@@ -0,0 +1,208 @@
+#ifndef _ASM_IA64_TERMBITS_H
+#define _ASM_IA64_TERMBITS_H
+
+/*
+ * Based on <asm-i386/termbits.h>.
+ *
+ * Modified 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28	Added new baudrates
+ */
+
+#include <linux/posix_types.h>
+
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+typedef unsigned int	tcflag_t;
+
+#define NCCS 19
+struct termios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+};
+
+struct termios2 {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+struct ktermios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK	0000001
+#define BRKINT	0000002
+#define IGNPAR	0000004
+#define PARMRK	0000010
+#define INPCK	0000020
+#define ISTRIP	0000040
+#define INLCR	0000100
+#define IGNCR	0000200
+#define ICRNL	0000400
+#define IUCLC	0001000
+#define IXON	0002000
+#define IXANY	0004000
+#define IXOFF	0010000
+#define IMAXBEL	0020000
+#define IUTF8	0040000
+
+/* c_oflag bits */
+#define OPOST	0000001
+#define OLCUC	0000002
+#define ONLCR	0000004
+#define OCRNL	0000010
+#define ONOCR	0000020
+#define ONLRET	0000040
+#define OFILL	0000100
+#define OFDEL	0000200
+#define NLDLY	0000400
+#define   NL0	0000000
+#define   NL1	0000400
+#define CRDLY	0003000
+#define   CR0	0000000
+#define   CR1	0001000
+#define   CR2	0002000
+#define   CR3	0003000
+#define TABDLY	0014000
+#define   TAB0	0000000
+#define   TAB1	0004000
+#define   TAB2	0010000
+#define   TAB3	0014000
+#define   XTABS	0014000
+#define BSDLY	0020000
+#define   BS0	0000000
+#define   BS1	0020000
+#define VTDLY	0040000
+#define   VT0	0000000
+#define   VT1	0040000
+#define FFDLY	0100000
+#define   FF0	0000000
+#define   FF1	0100000
+
+/* c_cflag bit meaning */
+#define CBAUD	0010017
+#define  B0	0000000		/* hang up */
+#define  B50	0000001
+#define  B75	0000002
+#define  B110	0000003
+#define  B134	0000004
+#define  B150	0000005
+#define  B200	0000006
+#define  B300	0000007
+#define  B600	0000010
+#define  B1200	0000011
+#define  B1800	0000012
+#define  B2400	0000013
+#define  B4800	0000014
+#define  B9600	0000015
+#define  B19200	0000016
+#define  B38400	0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE	0000060
+#define   CS5	0000000
+#define   CS6	0000020
+#define   CS7	0000040
+#define   CS8	0000060
+#define CSTOPB	0000100
+#define CREAD	0000200
+#define PARENB	0000400
+#define PARODD	0001000
+#define HUPCL	0002000
+#define CLOCAL	0004000
+#define CBAUDEX 0010000
+#define    BOTHER 0010000
+#define    B57600 0010001
+#define   B115200 0010002
+#define   B230400 0010003
+#define   B460800 0010004
+#define   B500000 0010005
+#define   B576000 0010006
+#define   B921600 0010007
+#define  B1000000 0010010
+#define  B1152000 0010011
+#define  B1500000 0010012
+#define  B2000000 0010013
+#define  B2500000 0010014
+#define  B3000000 0010015
+#define  B3500000 0010016
+#define  B4000000 0010017
+#define CIBAUD	  002003600000		/* input baud rate */
+#define CMSPAR	  010000000000		/* mark or space (stick) parity */
+#define CRTSCTS	  020000000000		/* flow control */
+
+#define IBSHIFT	16		/* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define ISIG	0000001
+#define ICANON	0000002
+#define XCASE	0000004
+#define ECHO	0000010
+#define ECHOE	0000020
+#define ECHOK	0000040
+#define ECHONL	0000100
+#define NOFLSH	0000200
+#define TOSTOP	0000400
+#define ECHOCTL	0001000
+#define ECHOPRT	0002000
+#define ECHOKE	0004000
+#define FLUSHO	0010000
+#define PENDIN	0040000
+#define IEXTEN	0100000
+#define EXTPROC	0200000
+
+/* tcflow() and TCXONC use these */
+#define	TCOOFF		0
+#define	TCOON		1
+#define	TCIOFF		2
+#define	TCION		3
+
+/* tcflush() and TCFLSH use these */
+#define	TCIFLUSH	0
+#define	TCOFLUSH	1
+#define	TCIOFLUSH	2
+
+/* tcsetattr uses these */
+#define	TCSANOW		0
+#define	TCSADRAIN	1
+#define	TCSAFLUSH	2
+
+#endif /* _ASM_IA64_TERMBITS_H */
diff --git a/arch/ia64/include/uapi/asm/termios.h b/arch/ia64/include/uapi/asm/termios.h
new file mode 100644
index 0000000..d59b48c
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/termios.h
@@ -0,0 +1,50 @@
+/*
+ * Modified 1999
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28	Added N_IRDA and N_SMSBLOCK
+ */
+#ifndef _UAPI_ASM_IA64_TERMIOS_H
+#define _UAPI_ASM_IA64_TERMIOS_H
+
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+#define TIOCM_OUT1	0x2000
+#define TIOCM_OUT2	0x4000
+#define TIOCM_LOOP	0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+
+#endif /* _UAPI_ASM_IA64_TERMIOS_H */
diff --git a/arch/ia64/include/uapi/asm/types.h b/arch/ia64/include/uapi/asm/types.h
new file mode 100644
index 0000000..321193b
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/types.h
@@ -0,0 +1,31 @@
+/*
+ * This file is never included by application software unless explicitly
+ * requested (e.g., via linux/types.h) in which case the application is
+ * Linux specific so (user-) name space pollution is not a major issue.
+ * However, for interoperability, libraries still need to be careful to
+ * avoid naming clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_TYPES_H
+#define _UAPI_ASM_IA64_TYPES_H
+
+
+#ifndef __KERNEL__
+#include <asm-generic/int-l64.h>
+#endif
+
+#ifdef __ASSEMBLY__
+# define __IA64_UL(x)		(x)
+# define __IA64_UL_CONST(x)	x
+
+#else
+# define __IA64_UL(x)		((unsigned long)(x))
+# define __IA64_UL_CONST(x)	x##UL
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_IA64_TYPES_H */
diff --git a/arch/ia64/include/uapi/asm/ucontext.h b/arch/ia64/include/uapi/asm/ucontext.h
new file mode 100644
index 0000000..bf573dc
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_UCONTEXT_H
+#define _ASM_IA64_UCONTEXT_H
+
+struct ucontext {
+	struct sigcontext uc_mcontext;
+};
+
+#define uc_link		uc_mcontext.sc_gr[0]	/* wrong type; nobody cares */
+#define uc_sigmask	uc_mcontext.sc_sigmask
+#define uc_stack	uc_mcontext.sc_stack
+
+#endif /* _ASM_IA64_UCONTEXT_H */
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
new file mode 100644
index 0000000..762edce
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -0,0 +1,340 @@
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _UAPI_ASM_IA64_UNISTD_H
+#define _UAPI_ASM_IA64_UNISTD_H
+
+
+#include <asm/break.h>
+
+#define __BREAK_SYSCALL			__IA64_BREAK_SYSCALL
+
+#define __NR_ni_syscall			1024
+#define __NR_exit			1025
+#define __NR_read			1026
+#define __NR_write			1027
+#define __NR_open			1028
+#define __NR_close			1029
+#define __NR_creat			1030
+#define __NR_link			1031
+#define __NR_unlink			1032
+#define __NR_execve			1033
+#define __NR_chdir			1034
+#define __NR_fchdir			1035
+#define __NR_utimes			1036
+#define __NR_mknod			1037
+#define __NR_chmod			1038
+#define __NR_chown			1039
+#define __NR_lseek			1040
+#define __NR_getpid			1041
+#define __NR_getppid			1042
+#define __NR_mount			1043
+#define __NR_umount			1044
+#define __NR_setuid			1045
+#define __NR_getuid			1046
+#define __NR_geteuid			1047
+#define __NR_ptrace			1048
+#define __NR_access			1049
+#define __NR_sync			1050
+#define __NR_fsync			1051
+#define __NR_fdatasync			1052
+#define __NR_kill			1053
+#define __NR_rename			1054
+#define __NR_mkdir			1055
+#define __NR_rmdir			1056
+#define __NR_dup			1057
+#define __NR_pipe			1058
+#define __NR_times			1059
+#define __NR_brk			1060
+#define __NR_setgid			1061
+#define __NR_getgid			1062
+#define __NR_getegid			1063
+#define __NR_acct			1064
+#define __NR_ioctl			1065
+#define __NR_fcntl			1066
+#define __NR_umask			1067
+#define __NR_chroot			1068
+#define __NR_ustat			1069
+#define __NR_dup2			1070
+#define __NR_setreuid			1071
+#define __NR_setregid			1072
+#define __NR_getresuid			1073
+#define __NR_setresuid			1074
+#define __NR_getresgid			1075
+#define __NR_setresgid			1076
+#define __NR_getgroups			1077
+#define __NR_setgroups			1078
+#define __NR_getpgid			1079
+#define __NR_setpgid			1080
+#define __NR_setsid			1081
+#define __NR_getsid			1082
+#define __NR_sethostname		1083
+#define __NR_setrlimit			1084
+#define __NR_getrlimit			1085
+#define __NR_getrusage			1086
+#define __NR_gettimeofday		1087
+#define __NR_settimeofday		1088
+#define __NR_select			1089
+#define __NR_poll			1090
+#define __NR_symlink			1091
+#define __NR_readlink			1092
+#define __NR_uselib			1093
+#define __NR_swapon			1094
+#define __NR_swapoff			1095
+#define __NR_reboot			1096
+#define __NR_truncate			1097
+#define __NR_ftruncate			1098
+#define __NR_fchmod			1099
+#define __NR_fchown			1100
+#define __NR_getpriority		1101
+#define __NR_setpriority		1102
+#define __NR_statfs			1103
+#define __NR_fstatfs			1104
+#define __NR_gettid			1105
+#define __NR_semget			1106
+#define __NR_semop			1107
+#define __NR_semctl			1108
+#define __NR_msgget			1109
+#define __NR_msgsnd			1110
+#define __NR_msgrcv			1111
+#define __NR_msgctl			1112
+#define __NR_shmget			1113
+#define __NR_shmat			1114
+#define __NR_shmdt			1115
+#define __NR_shmctl			1116
+/* also known as klogctl() in GNU libc: */
+#define __NR_syslog			1117
+#define __NR_setitimer			1118
+#define __NR_getitimer			1119
+/* 1120 was __NR_old_stat */
+/* 1121 was __NR_old_lstat */
+/* 1122 was __NR_old_fstat */
+#define __NR_vhangup			1123
+#define __NR_lchown			1124
+#define __NR_remap_file_pages		1125
+#define __NR_wait4			1126
+#define __NR_sysinfo			1127
+#define __NR_clone			1128
+#define __NR_setdomainname		1129
+#define __NR_uname			1130
+#define __NR_adjtimex			1131
+/* 1132 was __NR_create_module */
+#define __NR_init_module		1133
+#define __NR_delete_module		1134
+/* 1135 was __NR_get_kernel_syms */
+/* 1136 was __NR_query_module */
+#define __NR_quotactl			1137
+#define __NR_bdflush			1138
+#define __NR_sysfs			1139
+#define __NR_personality		1140
+#define __NR_afs_syscall		1141
+#define __NR_setfsuid			1142
+#define __NR_setfsgid			1143
+#define __NR_getdents			1144
+#define __NR_flock			1145
+#define __NR_readv			1146
+#define __NR_writev			1147
+#define __NR_pread64			1148
+#define __NR_pwrite64			1149
+#define __NR__sysctl			1150
+#define __NR_mmap			1151
+#define __NR_munmap			1152
+#define __NR_mlock			1153
+#define __NR_mlockall			1154
+#define __NR_mprotect			1155
+#define __NR_mremap			1156
+#define __NR_msync			1157
+#define __NR_munlock			1158
+#define __NR_munlockall			1159
+#define __NR_sched_getparam		1160
+#define __NR_sched_setparam		1161
+#define __NR_sched_getscheduler		1162
+#define __NR_sched_setscheduler		1163
+#define __NR_sched_yield		1164
+#define __NR_sched_get_priority_max	1165
+#define __NR_sched_get_priority_min	1166
+#define __NR_sched_rr_get_interval	1167
+#define __NR_nanosleep			1168
+#define __NR_nfsservctl			1169
+#define __NR_prctl			1170
+/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
+#define __NR_mmap2			1172
+#define __NR_pciconfig_read		1173
+#define __NR_pciconfig_write		1174
+#define __NR_perfmonctl			1175
+#define __NR_sigaltstack		1176
+#define __NR_rt_sigaction		1177
+#define __NR_rt_sigpending		1178
+#define __NR_rt_sigprocmask		1179
+#define __NR_rt_sigqueueinfo		1180
+#define __NR_rt_sigreturn		1181
+#define __NR_rt_sigsuspend		1182
+#define __NR_rt_sigtimedwait		1183
+#define __NR_getcwd			1184
+#define __NR_capget			1185
+#define __NR_capset			1186
+#define __NR_sendfile			1187
+#define __NR_getpmsg			1188
+#define __NR_putpmsg			1189
+#define __NR_socket			1190
+#define __NR_bind			1191
+#define __NR_connect			1192
+#define __NR_listen			1193
+#define __NR_accept			1194
+#define __NR_getsockname		1195
+#define __NR_getpeername		1196
+#define __NR_socketpair			1197
+#define __NR_send			1198
+#define __NR_sendto			1199
+#define __NR_recv			1200
+#define __NR_recvfrom			1201
+#define __NR_shutdown			1202
+#define __NR_setsockopt			1203
+#define __NR_getsockopt			1204
+#define __NR_sendmsg			1205
+#define __NR_recvmsg			1206
+#define __NR_pivot_root			1207
+#define __NR_mincore			1208
+#define __NR_madvise			1209
+#define __NR_stat			1210
+#define __NR_lstat			1211
+#define __NR_fstat			1212
+#define __NR_clone2			1213
+#define __NR_getdents64			1214
+#define __NR_getunwind			1215
+#define __NR_readahead			1216
+#define __NR_setxattr			1217
+#define __NR_lsetxattr			1218
+#define __NR_fsetxattr			1219
+#define __NR_getxattr			1220
+#define __NR_lgetxattr			1221
+#define __NR_fgetxattr			1222
+#define __NR_listxattr			1223
+#define __NR_llistxattr			1224
+#define __NR_flistxattr			1225
+#define __NR_removexattr		1226
+#define __NR_lremovexattr		1227
+#define __NR_fremovexattr		1228
+#define __NR_tkill			1229
+#define __NR_futex			1230
+#define __NR_sched_setaffinity		1231
+#define __NR_sched_getaffinity		1232
+#define __NR_set_tid_address		1233
+#define __NR_fadvise64			1234
+#define __NR_tgkill			1235
+#define __NR_exit_group			1236
+#define __NR_lookup_dcookie		1237
+#define __NR_io_setup			1238
+#define __NR_io_destroy			1239
+#define __NR_io_getevents		1240
+#define __NR_io_submit			1241
+#define __NR_io_cancel			1242
+#define __NR_epoll_create		1243
+#define __NR_epoll_ctl			1244
+#define __NR_epoll_wait			1245
+#define __NR_restart_syscall		1246
+#define __NR_semtimedop			1247
+#define __NR_timer_create		1248
+#define __NR_timer_settime		1249
+#define __NR_timer_gettime		1250
+#define __NR_timer_getoverrun		1251
+#define __NR_timer_delete		1252
+#define __NR_clock_settime		1253
+#define __NR_clock_gettime		1254
+#define __NR_clock_getres		1255
+#define __NR_clock_nanosleep		1256
+#define __NR_fstatfs64			1257
+#define __NR_statfs64			1258
+#define __NR_mbind			1259
+#define __NR_get_mempolicy		1260
+#define __NR_set_mempolicy		1261
+#define __NR_mq_open			1262
+#define __NR_mq_unlink			1263
+#define __NR_mq_timedsend		1264
+#define __NR_mq_timedreceive		1265
+#define __NR_mq_notify			1266
+#define __NR_mq_getsetattr		1267
+#define __NR_kexec_load			1268
+#define __NR_vserver			1269
+#define __NR_waitid			1270
+#define __NR_add_key			1271
+#define __NR_request_key		1272
+#define __NR_keyctl			1273
+#define __NR_ioprio_set			1274
+#define __NR_ioprio_get			1275
+#define __NR_move_pages			1276
+#define __NR_inotify_init		1277
+#define __NR_inotify_add_watch		1278
+#define __NR_inotify_rm_watch		1279
+#define __NR_migrate_pages		1280
+#define __NR_openat			1281
+#define __NR_mkdirat			1282
+#define __NR_mknodat			1283
+#define __NR_fchownat			1284
+#define __NR_futimesat			1285
+#define __NR_newfstatat			1286
+#define __NR_unlinkat			1287
+#define __NR_renameat			1288
+#define __NR_linkat			1289
+#define __NR_symlinkat			1290
+#define __NR_readlinkat			1291
+#define __NR_fchmodat			1292
+#define __NR_faccessat			1293
+#define __NR_pselect6			1294
+#define __NR_ppoll			1295
+#define __NR_unshare			1296
+#define __NR_splice			1297
+#define __NR_set_robust_list		1298
+#define __NR_get_robust_list		1299
+#define __NR_sync_file_range		1300
+#define __NR_tee			1301
+#define __NR_vmsplice			1302
+#define __NR_fallocate			1303
+#define __NR_getcpu			1304
+#define __NR_epoll_pwait		1305
+#define __NR_utimensat			1306
+#define __NR_signalfd			1307
+#define __NR_timerfd			1308
+#define __NR_eventfd			1309
+#define __NR_timerfd_create		1310
+#define __NR_timerfd_settime		1311
+#define __NR_timerfd_gettime		1312
+#define __NR_signalfd4			1313
+#define __NR_eventfd2			1314
+#define __NR_epoll_create1		1315
+#define __NR_dup3			1316
+#define __NR_pipe2			1317
+#define __NR_inotify_init1		1318
+#define __NR_preadv			1319
+#define __NR_pwritev			1320
+#define __NR_rt_tgsigqueueinfo		1321
+#define __NR_recvmmsg			1322
+#define __NR_fanotify_init		1323
+#define __NR_fanotify_mark		1324
+#define __NR_prlimit64			1325
+#define __NR_name_to_handle_at		1326
+#define __NR_open_by_handle_at  	1327
+#define __NR_clock_adjtime		1328
+#define __NR_syncfs			1329
+#define __NR_setns			1330
+#define __NR_sendmmsg			1331
+#define __NR_process_vm_readv		1332
+#define __NR_process_vm_writev		1333
+#define __NR_accept4			1334
+#define __NR_finit_module		1335
+#define __NR_sched_setattr		1336
+#define __NR_sched_getattr		1337
+#define __NR_renameat2			1338
+#define __NR_getrandom			1339
+#define __NR_memfd_create		1340
+#define __NR_bpf			1341
+#define __NR_execveat			1342
+#define __NR_userfaultfd		1343
+#define __NR_membarrier			1344
+#define __NR_kcmp			1345
+#define __NR_mlock2			1346
+
+#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/include/uapi/asm/ustack.h b/arch/ia64/include/uapi/asm/ustack.h
new file mode 100644
index 0000000..1dfebc6
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ustack.h
@@ -0,0 +1,12 @@
+#ifndef _UAPI_ASM_IA64_USTACK_H
+#define _UAPI_ASM_IA64_USTACK_H
+
+/*
+ * Constants for the user stack size
+ */
+
+
+/* Make a default stack size of 2GiB */
+#define DEFAULT_USER_STACK_SIZE	(1UL << 31)
+
+#endif /* _UAPI_ASM_IA64_USTACK_H */