File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 0000000..3359e10
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+	     remote_device.o port.o \
+	     host.o task.o probe_roms.o \
+	     remote_node_context.o \
+	     remote_node_table.o \
+	     unsolicited_frame_control.o \
+	     port_config.o \
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 0000000..609dafd
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2807 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
+
+#define smu_max_ports(dcc_value) \
+	(\
+		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+	)
+
+#define smu_max_task_contexts(dcc_value)	\
+	(\
+		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+	)
+
+#define smu_max_rncs(dcc_value) \
+	(\
+		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+	)
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+	((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+	(\
+		((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+		>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT	\
+	)
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+	((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+	((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+		 const struct sci_base_state *state_table, u32 initial_state)
+{
+	sci_state_transition_t handler;
+
+	sm->initial_state_id    = initial_state;
+	sm->previous_state_id   = initial_state;
+	sm->current_state_id    = initial_state;
+	sm->state_table         = state_table;
+
+	handler = sm->state_table[initial_state].enter_state;
+	if (handler)
+		handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+	sci_state_transition_t handler;
+
+	handler = sm->state_table[sm->current_state_id].exit_state;
+	if (handler)
+		handler(sm);
+
+	sm->previous_state_id = sm->current_state_id;
+	sm->current_state_id = next_state;
+
+	handler = sm->state_table[sm->current_state_id].enter_state;
+	if (handler)
+		handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+	u32 get_value = ihost->completion_queue_get;
+	u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+	if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+	    COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+		return true;
+
+	return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+	if (sci_controller_completion_queue_has_entries(ihost))
+		return true;
+
+	/* we have a spurious interrupt it could be that we have already
+	 * emptied the completion queue from a previous interrupt
+	 * FIXME: really!?
+	 */
+	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+	/* There is a race in the hardware that could cause us not to be
+	 * notified of an interrupt completion if we do not take this
+	 * step.  We will mask then unmask the interrupts so if there is
+	 * another interrupt pending the clearing of the interrupt
+	 * source we get the next interrupt message.
+	 */
+	spin_lock(&ihost->scic_lock);
+	if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
+		writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+		writel(0, &ihost->smu_registers->interrupt_mask);
+	}
+	spin_unlock(&ihost->scic_lock);
+
+	return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+	struct isci_host *ihost = data;
+
+	if (sci_controller_isr(ihost))
+		tasklet_schedule(&ihost->completion_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+	u32 interrupt_status;
+
+	interrupt_status =
+		readl(&ihost->smu_registers->interrupt_status);
+	interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+	if (interrupt_status != 0) {
+		/*
+		 * There is an error interrupt pending so let it through and handle
+		 * in the callback */
+		return true;
+	}
+
+	/*
+	 * There is a race in the hardware that could cause us not to be notified
+	 * of an interrupt completion if we do not take this step.  We will mask
+	 * then unmask the error interrupts so if there was another interrupt
+	 * pending we will be notified.
+	 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+	writel(0xff, &ihost->smu_registers->interrupt_mask);
+	writel(0, &ihost->smu_registers->interrupt_mask);
+
+	return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+	u32 index = SCU_GET_COMPLETION_INDEX(ent);
+	struct isci_request *ireq = ihost->reqs[index];
+
+	/* Make sure that we really want to process this IO request */
+	if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+	    ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+	    ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+		/* Yep this is a valid io request pass it along to the
+		 * io request handler
+		 */
+		sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+	u32 index;
+	struct isci_request *ireq;
+	struct isci_remote_device *idev;
+
+	index = SCU_GET_COMPLETION_INDEX(ent);
+
+	switch (scu_get_command_request_type(ent)) {
+	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+		ireq = ihost->reqs[index];
+		dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+			 __func__, ent, ireq);
+		/* @todo For a post TC operation we need to fail the IO
+		 * request
+		 */
+		break;
+	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+		idev = ihost->device_table[index];
+		dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+			 __func__, ent, idev);
+		/* @todo For a port RNC operation we need to fail the
+		 * device
+		 */
+		break;
+	default:
+		dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+			 __func__, ent);
+		break;
+	}
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+	u32 index;
+	u32 frame_index;
+
+	struct scu_unsolicited_frame_header *frame_header;
+	struct isci_phy *iphy;
+	struct isci_remote_device *idev;
+
+	enum sci_status result = SCI_FAILURE;
+
+	frame_index = SCU_GET_FRAME_INDEX(ent);
+
+	frame_header = ihost->uf_control.buffers.array[frame_index].header;
+	ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+	if (SCU_GET_FRAME_ERROR(ent)) {
+		/*
+		 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+		 * /       this cause a problem? We expect the phy initialization will
+		 * /       fail if there is an error in the frame. */
+		sci_controller_release_frame(ihost, frame_index);
+		return;
+	}
+
+	if (frame_header->is_address_frame) {
+		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+		iphy = &ihost->phys[index];
+		result = sci_phy_frame_handler(iphy, frame_index);
+	} else {
+
+		index = SCU_GET_COMPLETION_INDEX(ent);
+
+		if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+			/*
+			 * This is a signature fis or a frame from a direct attached SATA
+			 * device that has not yet been created.  In either case forwared
+			 * the frame to the PE and let it take care of the frame data. */
+			index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+			iphy = &ihost->phys[index];
+			result = sci_phy_frame_handler(iphy, frame_index);
+		} else {
+			if (index < ihost->remote_node_entries)
+				idev = ihost->device_table[index];
+			else
+				idev = NULL;
+
+			if (idev != NULL)
+				result = sci_remote_device_frame_handler(idev, frame_index);
+			else
+				sci_controller_release_frame(ihost, frame_index);
+		}
+	}
+
+	if (result != SCI_SUCCESS) {
+		/*
+		 * / @todo Is there any reason to report some additional error message
+		 * /       when we get this failure notifiction? */
+	}
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+	struct isci_remote_device *idev;
+	struct isci_request *ireq;
+	struct isci_phy *iphy;
+	u32 index;
+
+	index = SCU_GET_COMPLETION_INDEX(ent);
+
+	switch (scu_get_event_type(ent)) {
+	case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+		/* / @todo The driver did something wrong and we need to fix the condtion. */
+		dev_err(&ihost->pdev->dev,
+			"%s: SCIC Controller 0x%p received SMU command error "
+			"0x%x\n",
+			__func__,
+			ihost,
+			ent);
+		break;
+
+	case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+	case SCU_EVENT_TYPE_SMU_ERROR:
+	case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+		/*
+		 * / @todo This is a hardware failure and its likely that we want to
+		 * /       reset the controller. */
+		dev_err(&ihost->pdev->dev,
+			"%s: SCIC Controller 0x%p received fatal controller "
+			"event  0x%x\n",
+			__func__,
+			ihost,
+			ent);
+		break;
+
+	case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+		ireq = ihost->reqs[index];
+		sci_io_request_event_handler(ireq, ent);
+		break;
+
+	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+		switch (scu_get_event_specifier(ent)) {
+		case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+		case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+			ireq = ihost->reqs[index];
+			if (ireq != NULL)
+				sci_io_request_event_handler(ireq, ent);
+			else
+				dev_warn(&ihost->pdev->dev,
+					 "%s: SCIC Controller 0x%p received "
+					 "event 0x%x for io request object "
+					 "that doesnt exist.\n",
+					 __func__,
+					 ihost,
+					 ent);
+
+			break;
+
+		case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+			idev = ihost->device_table[index];
+			if (idev != NULL)
+				sci_remote_device_event_handler(idev, ent);
+			else
+				dev_warn(&ihost->pdev->dev,
+					 "%s: SCIC Controller 0x%p received "
+					 "event 0x%x for remote device object "
+					 "that doesnt exist.\n",
+					 __func__,
+					 ihost,
+					 ent);
+
+			break;
+		}
+		break;
+
+	case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+	/*
+	 * direct the broadcast change event to the phy first and then let
+	 * the phy redirect the broadcast change to the port object */
+	case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+	/*
+	 * direct error counter event to the phy object since that is where
+	 * we get the event notification.  This is a type 4 event. */
+	case SCU_EVENT_TYPE_OSSP_EVENT:
+		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+		iphy = &ihost->phys[index];
+		sci_phy_event_handler(iphy, ent);
+		break;
+
+	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+	case SCU_EVENT_TYPE_RNC_OPS_MISC:
+		if (index < ihost->remote_node_entries) {
+			idev = ihost->device_table[index];
+
+			if (idev != NULL)
+				sci_remote_device_event_handler(idev, ent);
+		} else
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC Controller 0x%p received event 0x%x "
+				"for remote device object 0x%0x that doesnt "
+				"exist.\n",
+				__func__,
+				ihost,
+				ent,
+				index);
+
+		break;
+
+	default:
+		dev_warn(&ihost->pdev->dev,
+			 "%s: SCIC Controller received unknown event code %x\n",
+			 __func__,
+			 ent);
+		break;
+	}
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+	u32 completion_count = 0;
+	u32 ent;
+	u32 get_index;
+	u32 get_cycle;
+	u32 event_get;
+	u32 event_cycle;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: completion queue beginning get:0x%08x\n",
+		__func__,
+		ihost->completion_queue_get);
+
+	/* Get the component parts of the completion queue */
+	get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+	get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+	event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+	event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+	while (
+		NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+		== COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+		) {
+		completion_count++;
+
+		ent = ihost->completion_queue[get_index];
+
+		/* increment the get pointer and check for rollover to toggle the cycle bit */
+		get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+			     (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+		get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+		dev_dbg(&ihost->pdev->dev,
+			"%s: completion queue entry:0x%08x\n",
+			__func__,
+			ent);
+
+		switch (SCU_GET_COMPLETION_TYPE(ent)) {
+		case SCU_COMPLETION_TYPE_TASK:
+			sci_controller_task_completion(ihost, ent);
+			break;
+
+		case SCU_COMPLETION_TYPE_SDMA:
+			sci_controller_sdma_completion(ihost, ent);
+			break;
+
+		case SCU_COMPLETION_TYPE_UFI:
+			sci_controller_unsolicited_frame(ihost, ent);
+			break;
+
+		case SCU_COMPLETION_TYPE_EVENT:
+			sci_controller_event_completion(ihost, ent);
+			break;
+
+		case SCU_COMPLETION_TYPE_NOTIFY: {
+			event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+				       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+			event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+			sci_controller_event_completion(ihost, ent);
+			break;
+		}
+		default:
+			dev_warn(&ihost->pdev->dev,
+				 "%s: SCIC Controller received unknown "
+				 "completion type %x\n",
+				 __func__,
+				 ent);
+			break;
+		}
+	}
+
+	/* Update the get register if we completed one or more entries */
+	if (completion_count > 0) {
+		ihost->completion_queue_get =
+			SMU_CQGR_GEN_BIT(ENABLE) |
+			SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+			event_cycle |
+			SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+			get_cycle |
+			SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+		writel(ihost->completion_queue_get,
+		       &ihost->smu_registers->completion_queue_get);
+
+	}
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: completion queue ending get:0x%08x\n",
+		__func__,
+		ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+	u32 interrupt_status;
+
+	interrupt_status =
+		readl(&ihost->smu_registers->interrupt_status);
+
+	if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+	    sci_controller_completion_queue_has_entries(ihost)) {
+
+		sci_controller_process_completions(ihost);
+		writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+	} else {
+		dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+			interrupt_status);
+
+		sci_change_state(&ihost->sm, SCIC_FAILED);
+
+		return;
+	}
+
+	/* If we dont process any completions I am not sure that we want to do this.
+	 * We are in the middle of a hardware fault and should probably be reset.
+	 */
+	writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct isci_host *ihost = data;
+
+	if (sci_controller_isr(ihost)) {
+		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+		tasklet_schedule(&ihost->completion_tasklet);
+		ret = IRQ_HANDLED;
+	} else if (sci_controller_error_isr(ihost)) {
+		spin_lock(&ihost->scic_lock);
+		sci_controller_error_handler(ihost);
+		spin_unlock(&ihost->scic_lock);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+	struct isci_host *ihost = data;
+
+	if (sci_controller_error_isr(ihost))
+		sci_controller_error_handler(ihost);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ *    through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ *    core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+	if (completion_status != SCI_SUCCESS)
+		dev_info(&ihost->pdev->dev,
+			"controller start timed out, continuing...\n");
+	clear_bit(IHOST_START_PENDING, &ihost->flags);
+	wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+	struct isci_host *ihost = ha->lldd_ha;
+
+	if (test_bit(IHOST_START_PENDING, &ihost->flags))
+		return 0;
+
+	sas_drain_work(ha);
+
+	return 1;
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ *    suggested sci_controller_start() timeout amount.  The user is free to
+ *    use any timeout value, but this method provides the suggested minimum
+ *    start timeout value.  The returned value is based upon empirical
+ *    information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ *    suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+	/* Validate the user supplied parameters. */
+	if (!ihost)
+		return 0;
+
+	/*
+	 * The suggested minimum timeout value for a controller start operation:
+	 *
+	 *     Signature FIS Timeout
+	 *   + Phy Start Timeout
+	 *   + Number of Phy Spin Up Intervals
+	 *   ---------------------------------
+	 *   Number of milliseconds for the controller start operation.
+	 *
+	 * NOTE: The number of phy spin up intervals will be equivalent
+	 *       to the number of phys divided by the number phys allowed
+	 *       per interval - 1 (once OEM parameters are supported).
+	 *       Currently we assume only 1 phy per interval. */
+
+	return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+		+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+		+ ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+	set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
+	writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+	clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
+	writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+	readl(&ihost->smu_registers->interrupt_mask); /* flush */
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+	u32 port_task_scheduler_value;
+
+	port_task_scheduler_value =
+		readl(&ihost->scu_registers->peg0.ptsg.control);
+	port_task_scheduler_value |=
+		(SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+		 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+	writel(port_task_scheduler_value,
+	       &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+	u32 task_assignment;
+
+	/*
+	 * Assign all the TCs to function 0
+	 * TODO: Do we actually need to read this register to write it back?
+	 */
+
+	task_assignment =
+		readl(&ihost->smu_registers->task_context_assignment[0]);
+
+	task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+		(SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
+		(SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+	writel(task_assignment,
+		&ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+	u32 index;
+	u32 completion_queue_control_value;
+	u32 completion_queue_get_value;
+	u32 completion_queue_put_value;
+
+	ihost->completion_queue_get = 0;
+
+	completion_queue_control_value =
+		(SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+		 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+	writel(completion_queue_control_value,
+	       &ihost->smu_registers->completion_queue_control);
+
+
+	/* Set the completion queue get pointer and enable the queue */
+	completion_queue_get_value = (
+		(SMU_CQGR_GEN_VAL(POINTER, 0))
+		| (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+		| (SMU_CQGR_GEN_BIT(ENABLE))
+		| (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+		);
+
+	writel(completion_queue_get_value,
+	       &ihost->smu_registers->completion_queue_get);
+
+	/* Set the completion queue put pointer */
+	completion_queue_put_value = (
+		(SMU_CQPR_GEN_VAL(POINTER, 0))
+		| (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+		);
+
+	writel(completion_queue_put_value,
+	       &ihost->smu_registers->completion_queue_put);
+
+	/* Initialize the cycle bit of the completion queue entries */
+	for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+		/*
+		 * If get.cycle_bit != completion_queue.cycle_bit
+		 * its not a valid completion queue entry
+		 * so at system start all entries are invalid */
+		ihost->completion_queue[index] = 0x80000000;
+	}
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+	u32 frame_queue_control_value;
+	u32 frame_queue_get_value;
+	u32 frame_queue_put_value;
+
+	/* Write the queue size */
+	frame_queue_control_value =
+		SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+	writel(frame_queue_control_value,
+	       &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+	/* Setup the get pointer for the unsolicited frame queue */
+	frame_queue_get_value = (
+		SCU_UFQGP_GEN_VAL(POINTER, 0)
+		|  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+		);
+
+	writel(frame_queue_get_value,
+	       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+	/* Setup the put pointer for the unsolicited frame queue */
+	frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+	writel(frame_queue_put_value,
+	       &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+	if (ihost->sm.current_state_id == SCIC_STARTING) {
+		/*
+		 * We move into the ready state, because some of the phys/ports
+		 * may be up and operational.
+		 */
+		sci_change_state(&ihost->sm, SCIC_READY);
+
+		isci_host_start_complete(ihost, status);
+	}
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+	enum sci_phy_states state;
+
+	state = iphy->sm.current_state_id;
+	switch (state) {
+	case SCI_PHY_STARTING:
+	case SCI_PHY_SUB_INITIAL:
+	case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+	case SCI_PHY_SUB_AWAIT_IAF_UF:
+	case SCI_PHY_SUB_AWAIT_SAS_POWER:
+	case SCI_PHY_SUB_AWAIT_SATA_POWER:
+	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+	case SCI_PHY_SUB_AWAIT_OSSP_EN:
+	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+	case SCI_PHY_SUB_FINAL:
+		return true;
+	default:
+		return false;
+	}
+}
+
+bool is_controller_start_complete(struct isci_host *ihost)
+{
+	int i;
+
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		struct isci_phy *iphy = &ihost->phys[i];
+		u32 state = iphy->sm.current_state_id;
+
+		/* in apc mode we need to check every phy, in
+		 * mpc mode we only need to check phys that have
+		 * been configured into a port
+		 */
+		if (is_port_config_apc(ihost))
+			/* pass */;
+		else if (!phy_get_non_dummy_port(iphy))
+			continue;
+
+		/* The controller start operation is complete iff:
+		 * - all links have been given an opportunity to start
+		 * - have no indication of a connected device
+		 * - have an indication of a connected device and it has
+		 *   finished the link training process.
+		 */
+		if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+		    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+		    (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+		    (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+	struct sci_oem_params *oem = &ihost->oem_parameters;
+	struct isci_phy *iphy;
+	enum sci_status status;
+
+	status = SCI_SUCCESS;
+
+	if (ihost->phy_startup_timer_pending)
+		return status;
+
+	if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+		if (is_controller_start_complete(ihost)) {
+			sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+			sci_del_timer(&ihost->phy_timer);
+			ihost->phy_startup_timer_pending = false;
+		}
+	} else {
+		iphy = &ihost->phys[ihost->next_phy_to_start];
+
+		if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+			if (phy_get_non_dummy_port(iphy) == NULL) {
+				ihost->next_phy_to_start++;
+
+				/* Caution recursion ahead be forwarned
+				 *
+				 * The PHY was never added to a PORT in MPC mode
+				 * so start the next phy in sequence This phy
+				 * will never go link up and will not draw power
+				 * the OEM parameters either configured the phy
+				 * incorrectly for the PORT or it was never
+				 * assigned to a PORT
+				 */
+				return sci_controller_start_next_phy(ihost);
+			}
+		}
+
+		status = sci_phy_start(iphy);
+
+		if (status == SCI_SUCCESS) {
+			sci_mod_timer(&ihost->phy_timer,
+				      SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+			ihost->phy_startup_timer_pending = true;
+		} else {
+			dev_warn(&ihost->pdev->dev,
+				 "%s: Controller stop operation failed "
+				 "to stop phy %d because of status "
+				 "%d.\n",
+				 __func__,
+				 ihost->phys[ihost->next_phy_to_start].phy_index,
+				 status);
+		}
+
+		ihost->next_phy_to_start++;
+	}
+
+	return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+	unsigned long flags;
+	enum sci_status status;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	ihost->phy_startup_timer_pending = false;
+
+	do {
+		status = sci_controller_start_next_phy(ihost);
+	} while (status != SCI_SUCCESS);
+
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+	return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+					     u32 timeout)
+{
+	enum sci_status result;
+	u16 index;
+
+	if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	/* Build the TCi free pool */
+	BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+	ihost->tci_head = 0;
+	ihost->tci_tail = 0;
+	for (index = 0; index < ihost->task_context_entries; index++)
+		isci_tci_free(ihost, index);
+
+	/* Build the RNi free pool */
+	sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+					 ihost->remote_node_entries);
+
+	/*
+	 * Before anything else lets make sure we will not be
+	 * interrupted by the hardware.
+	 */
+	sci_controller_disable_interrupts(ihost);
+
+	/* Enable the port task scheduler */
+	sci_controller_enable_port_task_scheduler(ihost);
+
+	/* Assign all the task entries to ihost physical function */
+	sci_controller_assign_task_entries(ihost);
+
+	/* Now initialize the completion queue */
+	sci_controller_initialize_completion_queue(ihost);
+
+	/* Initialize the unsolicited frame queue for use */
+	sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+	/* Start all of the ports on this controller */
+	for (index = 0; index < ihost->logical_port_entries; index++) {
+		struct isci_port *iport = &ihost->ports[index];
+
+		result = sci_port_start(iport);
+		if (result)
+			return result;
+	}
+
+	sci_controller_start_next_phy(ihost);
+
+	sci_mod_timer(&ihost->timer, timeout);
+
+	sci_change_state(&ihost->sm, SCIC_STARTING);
+
+	return SCI_SUCCESS;
+}
+
+void isci_host_start(struct Scsi_Host *shost)
+{
+	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+	unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+	set_bit(IHOST_START_PENDING, &ihost->flags);
+
+	spin_lock_irq(&ihost->scic_lock);
+	sci_controller_start(ihost, tmo);
+	sci_controller_enable_interrupts(ihost);
+	spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost)
+{
+	sci_controller_disable_interrupts(ihost);
+	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+	wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+	/* Empty out the completion queue */
+	if (sci_controller_completion_queue_has_entries(ihost))
+		sci_controller_process_completions(ihost);
+
+	/* Clear the interrupt and enable all interrupts again */
+	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+	/* Could we write the value of SMU_ISR_COMPLETION? */
+	writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+	writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
+{
+	if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
+	    !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+		if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
+			/* Normal notification (task_done) */
+			dev_dbg(&ihost->pdev->dev,
+				"%s: Normal - ireq/task = %p/%p\n",
+				__func__, ireq, task);
+			task->lldd_task = NULL;
+			task->task_done(task);
+		} else {
+			dev_dbg(&ihost->pdev->dev,
+				"%s: Error - ireq/task = %p/%p\n",
+				__func__, ireq, task);
+			if (sas_protocol_ata(task->task_proto))
+				task->lldd_task = NULL;
+			sas_task_abort(task);
+		}
+	} else
+		task->lldd_task = NULL;
+
+	if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+		wake_up_all(&ihost->eventq);
+
+	if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+		isci_free_tag(ihost, ireq->io_tag);
+}
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ *    routine that calls the sci core library's completion handler. It's
+ *    scheduled as a tasklet from the interrupt service routine when interrupts
+ *    in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+void isci_host_completion_routine(unsigned long data)
+{
+	struct isci_host *ihost = (struct isci_host *)data;
+	u16 active;
+
+	spin_lock_irq(&ihost->scic_lock);
+	sci_controller_completion_handler(ihost);
+	spin_unlock_irq(&ihost->scic_lock);
+
+	/*
+	 * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
+	 * issued for hardware issue workaround
+	 */
+	active = isci_tci_active(ihost) - SCI_MAX_PORTS;
+
+	/*
+	 * the coalesence timeout doubles at each encoding step, so
+	 * update it based on the ilog2 value of the outstanding requests
+	 */
+	writel(SMU_ICC_GEN_VAL(NUMBER, active) |
+	       SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
+	       &ihost->smu_registers->interrupt_coalesce_control);
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ *    object.This method will invoke the associated user callback upon
+ *    completion.  The completion callback is called when the following
+ *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ *    controller has been quiesced. This method will ensure that all IO
+ *    requests are quiesced, phys are stopped, and all additional operation by
+ *    the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+	if (ihost->sm.current_state_id != SCIC_READY) {
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_mod_timer(&ihost->timer, timeout);
+	sci_change_state(&ihost->sm, SCIC_STOPPING);
+	return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ *    controller regardless of the state of said controller.  This operation is
+ *    considered destructive.  In other words, all current operations are wiped
+ *    out.  No IO completions for outstanding devices occur.  Outstanding IO
+ *    requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+	switch (ihost->sm.current_state_id) {
+	case SCIC_RESET:
+	case SCIC_READY:
+	case SCIC_STOPPING:
+	case SCIC_FAILED:
+		/*
+		 * The reset operation is not a graceful cleanup, just
+		 * perform the state transition.
+		 */
+		sci_change_state(&ihost->sm, SCIC_RESETTING);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+	u32 index;
+	enum sci_status status;
+	enum sci_status phy_status;
+
+	status = SCI_SUCCESS;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		phy_status = sci_phy_stop(&ihost->phys[index]);
+
+		if (phy_status != SCI_SUCCESS &&
+		    phy_status != SCI_FAILURE_INVALID_STATE) {
+			status = SCI_FAILURE;
+
+			dev_warn(&ihost->pdev->dev,
+				 "%s: Controller stop operation failed to stop "
+				 "phy %d because of status %d.\n",
+				 __func__,
+				 ihost->phys[index].phy_index, phy_status);
+		}
+	}
+
+	return status;
+}
+
+
+/**
+ * isci_host_deinit - shutdown frame reception and dma
+ * @ihost: host to take down
+ *
+ * This is called in either the driver shutdown or the suspend path.  In
+ * the shutdown case libsas went through port teardown and normal device
+ * removal (i.e. physical links stayed up to service scsi_device removal
+ * commands).  In the suspend case we disable the hardware without
+ * notifying libsas of the link down events since we want libsas to
+ * remember the domain across the suspend/resume cycle
+ */
+void isci_host_deinit(struct isci_host *ihost)
+{
+	int i;
+
+	/* disable output data selects */
+	for (i = 0; i < isci_gpio_count(ihost); i++)
+		writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
+
+	set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+	spin_lock_irq(&ihost->scic_lock);
+	sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+	spin_unlock_irq(&ihost->scic_lock);
+
+	wait_for_stop(ihost);
+
+	/* phy stop is after controller stop to allow port and device to
+	 * go idle before shutting down the phys, but the expectation is
+	 * that i/o has been shut off well before we reach this
+	 * function.
+	 */
+	sci_controller_stop_phys(ihost);
+
+	/* disable sgpio: where the above wait should give time for the
+	 * enclosure to sample the gpios going inactive
+	 */
+	writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
+
+	spin_lock_irq(&ihost->scic_lock);
+	sci_controller_reset(ihost);
+	spin_unlock_irq(&ihost->scic_lock);
+
+	/* Cancel any/all outstanding port timers */
+	for (i = 0; i < ihost->logical_port_entries; i++) {
+		struct isci_port *iport = &ihost->ports[i];
+		del_timer_sync(&iport->timer.timer);
+	}
+
+	/* Cancel any/all outstanding phy timers */
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		struct isci_phy *iphy = &ihost->phys[i];
+		del_timer_sync(&iphy->sata_timer.timer);
+	}
+
+	del_timer_sync(&ihost->port_agent.timer.timer);
+
+	del_timer_sync(&ihost->power_control.timer.timer);
+
+	del_timer_sync(&ihost->timer.timer);
+
+	del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+	struct pci_dev *pdev = isci_host->pdev;
+	int id = isci_host->id;
+
+	return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+	struct pci_dev *pdev = isci_host->pdev;
+	int id = isci_host->id;
+
+	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+	sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+	sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX                        256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ *    configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ *    for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ *    Queue before an interrupt is generated. If the number of entries exceed
+ *    this number, an interrupt will be generated. The valid range of the input
+ *    is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ *    input is [0, 2700000] . A setting of 0 is allowed and results in no
+ *    interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+					 u32 coalesce_number,
+					 u32 coalesce_timeout)
+{
+	u8 timeout_encode = 0;
+	u32 min = 0;
+	u32 max = 0;
+
+	/* Check if the input parameters fall in the range. */
+	if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+		return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+	/*
+	 *  Defined encoding for interrupt coalescing timeout:
+	 *              Value   Min      Max     Units
+	 *              -----   ---      ---     -----
+	 *              0       -        -       Disabled
+	 *              1       13.3     20.0    ns
+	 *              2       26.7     40.0
+	 *              3       53.3     80.0
+	 *              4       106.7    160.0
+	 *              5       213.3    320.0
+	 *              6       426.7    640.0
+	 *              7       853.3    1280.0
+	 *              8       1.7      2.6     us
+	 *              9       3.4      5.1
+	 *              10      6.8      10.2
+	 *              11      13.7     20.5
+	 *              12      27.3     41.0
+	 *              13      54.6     81.9
+	 *              14      109.2    163.8
+	 *              15      218.5    327.7
+	 *              16      436.9    655.4
+	 *              17      873.8    1310.7
+	 *              18      1.7      2.6     ms
+	 *              19      3.5      5.2
+	 *              20      7.0      10.5
+	 *              21      14.0     21.0
+	 *              22      28.0     41.9
+	 *              23      55.9     83.9
+	 *              24      111.8    167.8
+	 *              25      223.7    335.5
+	 *              26      447.4    671.1
+	 *              27      894.8    1342.2
+	 *              28      1.8      2.7     s
+	 *              Others Undefined */
+
+	/*
+	 * Use the table above to decide the encode of interrupt coalescing timeout
+	 * value for register writing. */
+	if (coalesce_timeout == 0)
+		timeout_encode = 0;
+	else{
+		/* make the timeout value in unit of (10 ns). */
+		coalesce_timeout = coalesce_timeout * 100;
+		min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+		max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+		/* get the encode of timeout for register writing. */
+		for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+		      timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+		      timeout_encode++) {
+			if (min <= coalesce_timeout &&  max > coalesce_timeout)
+				break;
+			else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+				 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+				if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+					break;
+				else{
+					timeout_encode++;
+					break;
+				}
+			} else {
+				max = max * 2;
+				min = min * 2;
+			}
+		}
+
+		if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+			/* the value is out of range. */
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+	}
+
+	writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+	       SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+	       &ihost->smu_registers->interrupt_coalesce_control);
+
+
+	ihost->interrupt_coalesce_number = (u16)coalesce_number;
+	ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+	return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+	u32 val;
+
+	/* enable clock gating for power control of the scu unit */
+	val = readl(&ihost->smu_registers->clock_gating_control);
+	val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
+		 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
+		 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
+	val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
+	writel(val, &ihost->smu_registers->clock_gating_control);
+
+	/* set the default interrupt coalescence number and timeout value. */
+	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+	/* disable interrupt coalescence. */
+	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+	u32 index;
+	enum sci_status port_status;
+	enum sci_status status = SCI_SUCCESS;
+
+	for (index = 0; index < ihost->logical_port_entries; index++) {
+		struct isci_port *iport = &ihost->ports[index];
+
+		port_status = sci_port_stop(iport);
+
+		if ((port_status != SCI_SUCCESS) &&
+		    (port_status != SCI_FAILURE_INVALID_STATE)) {
+			status = SCI_FAILURE;
+
+			dev_warn(&ihost->pdev->dev,
+				 "%s: Controller stop operation failed to "
+				 "stop port %d because of status %d.\n",
+				 __func__,
+				 iport->logical_port_index,
+				 port_status);
+		}
+	}
+
+	return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+	u32 index;
+	enum sci_status status;
+	enum sci_status device_status;
+
+	status = SCI_SUCCESS;
+
+	for (index = 0; index < ihost->remote_node_entries; index++) {
+		if (ihost->device_table[index] != NULL) {
+			/* / @todo What timeout value do we want to provide to this request? */
+			device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+			if ((device_status != SCI_SUCCESS) &&
+			    (device_status != SCI_FAILURE_INVALID_STATE)) {
+				dev_warn(&ihost->pdev->dev,
+					 "%s: Controller stop operation failed "
+					 "to stop device 0x%p because of "
+					 "status %d.\n",
+					 __func__,
+					 ihost->device_table[index], device_status);
+			}
+		}
+	}
+
+	return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+	sci_controller_stop_devices(ihost);
+	sci_controller_stop_ports(ihost);
+
+	if (!sci_controller_has_remote_devices_stopping(ihost))
+		isci_host_stop_complete(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+	sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+	/* Disable interrupts so we dont take any spurious interrupts */
+	sci_controller_disable_interrupts(ihost);
+
+	/* Reset the SCU */
+	writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+	/* Delay for 1ms to before clearing the CQP and UFQPR. */
+	udelay(1000);
+
+	/* The write to the CQGR clears the CQP */
+	writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+	/* The write to the UFQGP clears the UFQPR */
+	writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+
+	/* clear all interrupts */
+	writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+	sci_controller_reset_hardware(ihost);
+	sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+	[SCIC_INITIAL] = {
+		.enter_state = sci_controller_initial_state_enter,
+	},
+	[SCIC_RESET] = {},
+	[SCIC_INITIALIZING] = {},
+	[SCIC_INITIALIZED] = {},
+	[SCIC_STARTING] = {
+		.exit_state  = sci_controller_starting_state_exit,
+	},
+	[SCIC_READY] = {
+		.enter_state = sci_controller_ready_state_enter,
+		.exit_state  = sci_controller_ready_state_exit,
+	},
+	[SCIC_RESETTING] = {
+		.enter_state = sci_controller_resetting_state_enter,
+	},
+	[SCIC_STOPPING] = {
+		.enter_state = sci_controller_stopping_state_enter,
+		.exit_state = sci_controller_stopping_state_exit,
+	},
+	[SCIC_FAILED] = {}
+};
+
+static void controller_timeout(unsigned long data)
+{
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+	struct sci_base_state_machine *sm = &ihost->sm;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	if (sm->current_state_id == SCIC_STARTING)
+		sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+	else if (sm->current_state_id == SCIC_STOPPING) {
+		sci_change_state(sm, SCIC_FAILED);
+		isci_host_stop_complete(ihost);
+	} else	/* / @todo Now what do we want to do in this case? */
+		dev_err(&ihost->pdev->dev,
+			"%s: Controller timer fired when controller was not "
+			"in a state being timed.\n",
+			__func__);
+
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+						void __iomem *scu_base,
+						void __iomem *smu_base)
+{
+	u8 i;
+
+	sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+	ihost->scu_registers = scu_base;
+	ihost->smu_registers = smu_base;
+
+	sci_port_configuration_agent_construct(&ihost->port_agent);
+
+	/* Construct the ports for this controller */
+	for (i = 0; i < SCI_MAX_PORTS; i++)
+		sci_port_construct(&ihost->ports[i], i, ihost);
+	sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+	/* Construct the phys for this controller */
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		/* Add all the PHYs to the dummy port */
+		sci_phy_construct(&ihost->phys[i],
+				  &ihost->ports[SCI_MAX_PORTS], i);
+	}
+
+	ihost->invalid_phy_mask = 0;
+
+	sci_init_timer(&ihost->timer, controller_timeout);
+
+	return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
+{
+	int i;
+
+	for (i = 0; i < SCI_MAX_PORTS; i++)
+		if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+			return -EINVAL;
+
+	for (i = 0; i < SCI_MAX_PHYS; i++)
+		if (oem->phys[i].sas_address.high == 0 &&
+		    oem->phys[i].sas_address.low == 0)
+			return -EINVAL;
+
+	if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+		for (i = 0; i < SCI_MAX_PHYS; i++)
+			if (oem->ports[i].phy_mask != 0)
+				return -EINVAL;
+	} else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+		u8 phy_mask = 0;
+
+		for (i = 0; i < SCI_MAX_PHYS; i++)
+			phy_mask |= oem->ports[i].phy_mask;
+
+		if (phy_mask == 0)
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
+	    oem->controller.max_concurr_spin_up < 1)
+		return -EINVAL;
+
+	if (oem->controller.do_enable_ssc) {
+		if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+			return -EINVAL;
+
+		if (version >= ISCI_ROM_VER_1_1) {
+			u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+			switch (test) {
+			case 0:
+			case 2:
+			case 3:
+			case 6:
+			case 7:
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			test = oem->controller.ssc_sas_tx_spread_level;
+			if (oem->controller.ssc_sas_tx_type == 0) {
+				switch (test) {
+				case 0:
+				case 2:
+				case 3:
+					break;
+				default:
+					return -EINVAL;
+				}
+			} else if (oem->controller.ssc_sas_tx_type == 1) {
+				switch (test) {
+				case 0:
+				case 3:
+				case 6:
+					break;
+				default:
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static u8 max_spin_up(struct isci_host *ihost)
+{
+	if (ihost->user_parameters.max_concurr_spinup)
+		return min_t(u8, ihost->user_parameters.max_concurr_spinup,
+			     MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+	else
+		return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
+			     MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+}
+
+static void power_control_timeout(unsigned long data)
+{
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+	struct isci_phy *iphy;
+	unsigned long flags;
+	u8 i;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	ihost->power_control.phys_granted_power = 0;
+
+	if (ihost->power_control.phys_waiting == 0) {
+		ihost->power_control.timer_started = false;
+		goto done;
+	}
+
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+		if (ihost->power_control.phys_waiting == 0)
+			break;
+
+		iphy = ihost->power_control.requesters[i];
+		if (iphy == NULL)
+			continue;
+
+		if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
+			break;
+
+		ihost->power_control.requesters[i] = NULL;
+		ihost->power_control.phys_waiting--;
+		ihost->power_control.phys_granted_power++;
+		sci_phy_consume_power_handler(iphy);
+
+		if (iphy->protocol == SAS_PROTOCOL_SSP) {
+			u8 j;
+
+			for (j = 0; j < SCI_MAX_PHYS; j++) {
+				struct isci_phy *requester = ihost->power_control.requesters[j];
+
+				/*
+				 * Search the power_control queue to see if there are other phys
+				 * attached to the same remote device. If found, take all of
+				 * them out of await_sas_power state.
+				 */
+				if (requester != NULL && requester != iphy) {
+					u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+							  iphy->frame_rcvd.iaf.sas_addr,
+							  sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+					if (other == 0) {
+						ihost->power_control.requesters[j] = NULL;
+						ihost->power_control.phys_waiting--;
+						sci_phy_consume_power_handler(requester);
+					}
+				}
+			}
+		}
+	}
+
+	/*
+	 * It doesn't matter if the power list is empty, we need to start the
+	 * timer in case another phy becomes ready.
+	 */
+	sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+	ihost->power_control.timer_started = true;
+
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+					       struct isci_phy *iphy)
+{
+	BUG_ON(iphy == NULL);
+
+	if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
+		ihost->power_control.phys_granted_power++;
+		sci_phy_consume_power_handler(iphy);
+
+		/*
+		 * stop and start the power_control timer. When the timer fires, the
+		 * no_of_phys_granted_power will be set to 0
+		 */
+		if (ihost->power_control.timer_started)
+			sci_del_timer(&ihost->power_control.timer);
+
+		sci_mod_timer(&ihost->power_control.timer,
+				 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+		ihost->power_control.timer_started = true;
+
+	} else {
+		/*
+		 * There are phys, attached to the same sas address as this phy, are
+		 * already in READY state, this phy don't need wait.
+		 */
+		u8 i;
+		struct isci_phy *current_phy;
+
+		for (i = 0; i < SCI_MAX_PHYS; i++) {
+			u8 other;
+			current_phy = &ihost->phys[i];
+
+			other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+				       iphy->frame_rcvd.iaf.sas_addr,
+				       sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+			if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+			    current_phy->protocol == SAS_PROTOCOL_SSP &&
+			    other == 0) {
+				sci_phy_consume_power_handler(iphy);
+				break;
+			}
+		}
+
+		if (i == SCI_MAX_PHYS) {
+			/* Add the phy in the waiting list */
+			ihost->power_control.requesters[iphy->phy_index] = iphy;
+			ihost->power_control.phys_waiting++;
+		}
+	}
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+					       struct isci_phy *iphy)
+{
+	BUG_ON(iphy == NULL);
+
+	if (ihost->power_control.requesters[iphy->phy_index])
+		ihost->power_control.phys_waiting--;
+
+	ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+	return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+	return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+	int phy,
+	unsigned char selection_byte)
+{
+	return ((selection_byte & (1 << phy)) ? 1 : 0)
+		+ (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+	if (is_cable_select_overridden())
+		return ((unsigned char *)&cable_selection_override)
+			+ ihost->id;
+	else
+		return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+	return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+	static char *cable_names[] = {
+		[short_cable]     = "short",
+		[long_cable]      = "long",
+		[medium_cable]    = "medium",
+		[undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+	};
+	return (selection <= undefined_cable) ? cable_names[selection]
+					      : cable_names[undefined_cable];
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+	struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+	const struct sci_oem_params *oem = &ihost->oem_parameters;
+	struct pci_dev *pdev = ihost->pdev;
+	u32 afe_status;
+	u32 phy_id;
+	unsigned char cable_selection_mask = *to_cable_select(ihost);
+
+	/* Clear DFX Status registers */
+	writel(0x0081000f, &afe->afe_dfx_master_control0);
+	udelay(AFE_REGISTER_WRITE_DELAY);
+
+	if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
+		/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+		 * Timer, PM Stagger Timer
+		 */
+		writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+	}
+
+	/* Configure bias currents to normal */
+	if (is_a2(pdev))
+		writel(0x00005A00, &afe->afe_bias_control);
+	else if (is_b0(pdev) || is_c0(pdev))
+		writel(0x00005F00, &afe->afe_bias_control);
+	else if (is_c1(pdev))
+		writel(0x00005500, &afe->afe_bias_control);
+
+	udelay(AFE_REGISTER_WRITE_DELAY);
+
+	/* Enable PLL */
+	if (is_a2(pdev))
+		writel(0x80040908, &afe->afe_pll_control0);
+	else if (is_b0(pdev) || is_c0(pdev))
+		writel(0x80040A08, &afe->afe_pll_control0);
+	else if (is_c1(pdev)) {
+		writel(0x80000B08, &afe->afe_pll_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+		writel(0x00000B08, &afe->afe_pll_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+		writel(0x80000B08, &afe->afe_pll_control0);
+	}
+
+	udelay(AFE_REGISTER_WRITE_DELAY);
+
+	/* Wait for the PLL to lock */
+	do {
+		afe_status = readl(&afe->afe_common_block_status);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+	} while ((afe_status & 0x00001000) == 0);
+
+	if (is_a2(pdev)) {
+		/* Shorten SAS SNW lock time (RxLock timer value from 76
+		 * us to 50 us)
+		 */
+		writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+	}
+
+	for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+		struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
+		const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+		int cable_length_long =
+			is_long_cable(phy_id, cable_selection_mask);
+		int cable_length_medium =
+			is_medium_cable(phy_id, cable_selection_mask);
+
+		if (is_a2(pdev)) {
+			/* All defaults, except the Receive Word
+			 * Alignament/Comma Detect Enable....(0xe800)
+			 */
+			writel(0x00004512, &xcvr->afe_xcvr_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x0050100F, &xcvr->afe_xcvr_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+		} else if (is_b0(pdev)) {
+			/* Configure transmitter SSC parameters */
+			writel(0x00030000, &xcvr->afe_tx_ssc_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+		} else if (is_c0(pdev)) {
+			/* Configure transmitter SSC parameters */
+			writel(0x00010202, &xcvr->afe_tx_ssc_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			/* All defaults, except the Receive Word
+			 * Alignament/Comma Detect Enable....(0xe800)
+			 */
+			writel(0x00014500, &xcvr->afe_xcvr_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+		} else if (is_c1(pdev)) {
+			/* Configure transmitter SSC parameters */
+			writel(0x00010202, &xcvr->afe_tx_ssc_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			/* All defaults, except the Receive Word
+			 * Alignament/Comma Detect Enable....(0xe800)
+			 */
+			writel(0x0001C500, &xcvr->afe_xcvr_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+		}
+
+		/* Power up TX and RX out from power down (PWRDNTX and
+		 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+		 */
+		if (is_a2(pdev))
+			writel(0x000003F0, &xcvr->afe_channel_control);
+		else if (is_b0(pdev)) {
+			writel(0x000003D7, &xcvr->afe_channel_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x000003D4, &xcvr->afe_channel_control);
+		} else if (is_c0(pdev)) {
+			writel(0x000001E7, &xcvr->afe_channel_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x000001E4, &xcvr->afe_channel_control);
+		} else if (is_c1(pdev)) {
+			writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+			       &xcvr->afe_channel_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+			       &xcvr->afe_channel_control);
+		}
+		udelay(AFE_REGISTER_WRITE_DELAY);
+
+		if (is_a2(pdev)) {
+			/* Enable TX equalization (0xe824) */
+			writel(0x00040000, &xcvr->afe_tx_control);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+		}
+
+		if (is_a2(pdev) || is_b0(pdev))
+			/* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+			 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+			 * Enabled) ....(0xe800)
+			 */
+			writel(0x00004100, &xcvr->afe_xcvr_control0);
+		else if (is_c0(pdev))
+			writel(0x00014100, &xcvr->afe_xcvr_control0);
+		else if (is_c1(pdev))
+			writel(0x0001C100, &xcvr->afe_xcvr_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+
+		/* Leave DFE/FFE on */
+		if (is_a2(pdev))
+			writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
+		else if (is_b0(pdev)) {
+			writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+			/* Enable TX equalization (0xe824) */
+			writel(0x00040000, &xcvr->afe_tx_control);
+		} else if (is_c0(pdev)) {
+			writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			/* Enable TX equalization (0xe824) */
+			writel(0x00040000, &xcvr->afe_tx_control);
+		} else if (is_c1(pdev)) {
+			writel(cable_length_long ? 0x01500C0C :
+			       cable_length_medium ? 0x01400C0D : 0x02400C0D,
+			       &xcvr->afe_xcvr_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			writel(cable_length_long ? 0x33091C1F :
+			       cable_length_medium ? 0x3315181F : 0x2B17161F,
+			       &xcvr->afe_rx_ssc_control0);
+			udelay(AFE_REGISTER_WRITE_DELAY);
+
+			/* Enable TX equalization (0xe824) */
+			writel(0x00040000, &xcvr->afe_tx_control);
+		}
+
+		udelay(AFE_REGISTER_WRITE_DELAY);
+
+		writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+
+		writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+
+		writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+
+		writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
+		udelay(AFE_REGISTER_WRITE_DELAY);
+	}
+
+	/* Transfer control to the PEs */
+	writel(0x00010f00, &afe->afe_dfx_master_control0);
+	udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+	sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+	memset(ihost->power_control.requesters, 0,
+	       sizeof(ihost->power_control.requesters));
+
+	ihost->power_control.phys_waiting = 0;
+	ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+	struct sci_base_state_machine *sm = &ihost->sm;
+	enum sci_status result = SCI_FAILURE;
+	unsigned long i, state, val;
+
+	if (ihost->sm.current_state_id != SCIC_RESET) {
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_change_state(sm, SCIC_INITIALIZING);
+
+	sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+	ihost->next_phy_to_start = 0;
+	ihost->phy_startup_timer_pending = false;
+
+	sci_controller_initialize_power_control(ihost);
+
+	/*
+	 * There is nothing to do here for B0 since we do not have to
+	 * program the AFE registers.
+	 * / @todo The AFE settings are supposed to be correct for the B0 but
+	 * /       presently they seem to be wrong. */
+	sci_controller_afe_initialization(ihost);
+
+
+	/* Take the hardware out of reset */
+	writel(0, &ihost->smu_registers->soft_reset_control);
+
+	/*
+	 * / @todo Provide meaningfull error code for hardware failure
+	 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+	for (i = 100; i >= 1; i--) {
+		u32 status;
+
+		/* Loop until the hardware reports success */
+		udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+		status = readl(&ihost->smu_registers->control_status);
+
+		if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+			break;
+	}
+	if (i == 0)
+		goto out;
+
+	/*
+	 * Determine what are the actaul device capacities that the
+	 * hardware will support */
+	val = readl(&ihost->smu_registers->device_context_capacity);
+
+	/* Record the smaller of the two capacity values */
+	ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+	ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+	ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+	/*
+	 * Make all PEs that are unassigned match up with the
+	 * logical ports
+	 */
+	for (i = 0; i < ihost->logical_port_entries; i++) {
+		struct scu_port_task_scheduler_group_registers __iomem
+			*ptsg = &ihost->scu_registers->peg0.ptsg;
+
+		writel(i, &ptsg->protocol_engine[i]);
+	}
+
+	/* Initialize hardware PCI Relaxed ordering in DMA engines */
+	val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+	val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+	writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+	val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+	val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+	writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+	/*
+	 * Initialize the PHYs before the PORTs because the PHY registers
+	 * are accessed during the port initialization.
+	 */
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		result = sci_phy_initialize(&ihost->phys[i],
+					    &ihost->scu_registers->peg0.pe[i].tl,
+					    &ihost->scu_registers->peg0.pe[i].ll);
+		if (result != SCI_SUCCESS)
+			goto out;
+	}
+
+	for (i = 0; i < ihost->logical_port_entries; i++) {
+		struct isci_port *iport = &ihost->ports[i];
+
+		iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+		iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+		iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+	}
+
+	result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+	/* Advance the controller state machine */
+	if (result == SCI_SUCCESS)
+		state = SCIC_INITIALIZED;
+	else
+		state = SCIC_FAILED;
+	sci_change_state(sm, state);
+
+	return result;
+}
+
+static int sci_controller_dma_alloc(struct isci_host *ihost)
+{
+	struct device *dev = &ihost->pdev->dev;
+	size_t size;
+	int i;
+
+	/* detect re-initialization */
+	if (ihost->completion_queue)
+		return 0;
+
+	size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+	ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
+						      GFP_KERNEL);
+	if (!ihost->completion_queue)
+		return -ENOMEM;
+
+	size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+	ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
+							       GFP_KERNEL);
+
+	if (!ihost->remote_node_context_table)
+		return -ENOMEM;
+
+	size = ihost->task_context_entries * sizeof(struct scu_task_context),
+	ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
+							GFP_KERNEL);
+	if (!ihost->task_context_table)
+		return -ENOMEM;
+
+	size = SCI_UFI_TOTAL_SIZE;
+	ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
+	if (!ihost->ufi_buf)
+		return -ENOMEM;
+
+	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+		struct isci_request *ireq;
+		dma_addr_t dma;
+
+		ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
+		if (!ireq)
+			return -ENOMEM;
+
+		ireq->tc = &ihost->task_context_table[i];
+		ireq->owning_controller = ihost;
+		ireq->request_daddr = dma;
+		ireq->isci_host = ihost;
+		ihost->reqs[i] = ireq;
+	}
+
+	return 0;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+	int err = sci_controller_dma_alloc(ihost);
+
+	if (err)
+		return err;
+
+	writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
+	writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
+
+	writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
+	writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
+
+	writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
+	writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
+
+	sci_unsolicited_frame_control_construct(ihost);
+
+	/*
+	 * Inform the silicon as to the location of the UF headers and
+	 * address table.
+	 */
+	writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+		&ihost->scu_registers->sdma.uf_header_base_address_lower);
+	writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+		&ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+	writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+		&ihost->scu_registers->sdma.uf_address_table_lower);
+	writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+		&ihost->scu_registers->sdma.uf_address_table_upper);
+
+	return 0;
+}
+
+/**
+ * isci_host_init - (re-)initialize hardware and internal (private) state
+ * @ihost: host to init
+ *
+ * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
+ * one-time initialization objects like locks and waitqueues, are
+ * not touched (they are initialized in isci_host_alloc)
+ */
+int isci_host_init(struct isci_host *ihost)
+{
+	int i, err;
+	enum sci_status status;
+
+	spin_lock_irq(&ihost->scic_lock);
+	status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
+	spin_unlock_irq(&ihost->scic_lock);
+	if (status != SCI_SUCCESS) {
+		dev_err(&ihost->pdev->dev,
+			"%s: sci_controller_construct failed - status = %x\n",
+			__func__,
+			status);
+		return -ENODEV;
+	}
+
+	spin_lock_irq(&ihost->scic_lock);
+	status = sci_controller_initialize(ihost);
+	spin_unlock_irq(&ihost->scic_lock);
+	if (status != SCI_SUCCESS) {
+		dev_warn(&ihost->pdev->dev,
+			 "%s: sci_controller_initialize failed -"
+			 " status = 0x%x\n",
+			 __func__, status);
+		return -ENODEV;
+	}
+
+	err = sci_controller_mem_init(ihost);
+	if (err)
+		return err;
+
+	/* enable sgpio */
+	writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
+	for (i = 0; i < isci_gpio_count(ihost); i++)
+		writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
+	writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
+
+	return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+			    struct isci_phy *iphy)
+{
+	switch (ihost->sm.current_state_id) {
+	case SCIC_STARTING:
+		sci_del_timer(&ihost->phy_timer);
+		ihost->phy_startup_timer_pending = false;
+		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+						  iport, iphy);
+		sci_controller_start_next_phy(ihost);
+		break;
+	case SCIC_READY:
+		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+						  iport, iphy);
+		break;
+	default:
+		dev_dbg(&ihost->pdev->dev,
+			"%s: SCIC Controller linkup event from phy %d in "
+			"unexpected state %d\n", __func__, iphy->phy_index,
+			ihost->sm.current_state_id);
+	}
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+			      struct isci_phy *iphy)
+{
+	switch (ihost->sm.current_state_id) {
+	case SCIC_STARTING:
+	case SCIC_READY:
+		ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+						   iport, iphy);
+		break;
+	default:
+		dev_dbg(&ihost->pdev->dev,
+			"%s: SCIC Controller linkdown event from phy %d in "
+			"unexpected state %d\n",
+			__func__,
+			iphy->phy_index,
+			ihost->sm.current_state_id);
+	}
+}
+
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+	u32 index;
+
+	for (index = 0; index < ihost->remote_node_entries; index++) {
+		if ((ihost->device_table[index] != NULL) &&
+		   (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+			return true;
+	}
+
+	return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+					  struct isci_remote_device *idev)
+{
+	if (ihost->sm.current_state_id != SCIC_STOPPING) {
+		dev_dbg(&ihost->pdev->dev,
+			"SCIC Controller 0x%p remote device stopped event "
+			"from device 0x%p in unexpected state %d\n",
+			ihost, idev,
+			ihost->sm.current_state_id);
+		return;
+	}
+
+	if (!sci_controller_has_remote_devices_stopping(ihost))
+		isci_host_stop_complete(ihost);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+	dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+		__func__, ihost->id, request);
+
+	writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+	u16 task_index;
+	u16 task_sequence;
+
+	task_index = ISCI_TAG_TCI(io_tag);
+
+	if (task_index < ihost->task_context_entries) {
+		struct isci_request *ireq = ihost->reqs[task_index];
+
+		if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+			task_sequence = ISCI_TAG_SEQ(io_tag);
+
+			if (task_sequence == ihost->io_request_sequence[task_index])
+				return ireq;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ *    context space for use. This method can fail if there are no more remote
+ *    node index available.
+ * @scic: This is the controller object which contains the set of
+ *    free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ *    id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ *    is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+							    struct isci_remote_device *idev,
+							    u16 *node_id)
+{
+	u16 node_index;
+	u32 remote_node_count = sci_remote_device_node_count(idev);
+
+	node_index = sci_remote_node_table_allocate_remote_node(
+		&ihost->available_remote_nodes, remote_node_count
+		);
+
+	if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+		ihost->device_table[node_index] = idev;
+
+		*node_id = node_index;
+
+		return SCI_SUCCESS;
+	}
+
+	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+					     struct isci_remote_device *idev,
+					     u16 node_id)
+{
+	u32 remote_node_count = sci_remote_device_node_count(idev);
+
+	if (ihost->device_table[node_id] == idev) {
+		ihost->device_table[node_id] = NULL;
+
+		sci_remote_node_table_release_remote_node_index(
+			&ihost->available_remote_nodes, remote_node_count, node_id
+			);
+	}
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+				       void *frame_header,
+				       void *frame_buffer)
+{
+	/* XXX type safety? */
+	memcpy(response_buffer, frame_header, sizeof(u32));
+
+	memcpy(response_buffer + sizeof(u32),
+	       frame_buffer,
+	       sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+	if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+		writel(ihost->uf_control.get,
+			&ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+	u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+	ihost->tci_pool[tail] = tci;
+	ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+	u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+	u16 tci = ihost->tci_pool[head];
+
+	ihost->tci_head = head + 1;
+	return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+	return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+	if (isci_tci_space(ihost)) {
+		u16 tci = isci_tci_alloc(ihost);
+		u8 seq = ihost->io_request_sequence[tci];
+
+		return ISCI_TAG(seq, tci);
+	}
+
+	return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+	u16 tci = ISCI_TAG_TCI(io_tag);
+	u16 seq = ISCI_TAG_SEQ(io_tag);
+
+	/* prevent tail from passing head */
+	if (isci_tci_active(ihost) == 0)
+		return SCI_FAILURE_INVALID_IO_TAG;
+
+	if (seq == ihost->io_request_sequence[tci]) {
+		ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+		isci_tci_free(ihost, tci);
+
+		return SCI_SUCCESS;
+	}
+	return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+					struct isci_remote_device *idev,
+					struct isci_request *ireq)
+{
+	enum sci_status status;
+
+	if (ihost->sm.current_state_id != SCIC_READY) {
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	status = sci_remote_device_start_io(ihost, idev, ireq);
+	if (status != SCI_SUCCESS)
+		return status;
+
+	set_bit(IREQ_ACTIVE, &ireq->flags);
+	sci_controller_post_request(ihost, ireq->post_context);
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+						 struct isci_remote_device *idev,
+						 struct isci_request *ireq)
+{
+	/* terminate an ongoing (i.e. started) core IO request.  This does not
+	 * abort the IO request at the target, but rather removes the IO
+	 * request from the host controller.
+	 */
+	enum sci_status status;
+
+	if (ihost->sm.current_state_id != SCIC_READY) {
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+	status = sci_io_request_terminate(ireq);
+
+	dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
+		__func__, status, ireq, ireq->flags);
+
+	if ((status == SCI_SUCCESS) &&
+	    !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
+	    !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
+		/* Utilize the original post context command and or in the
+		 * POST_TC_ABORT request sub-type.
+		 */
+		sci_controller_post_request(
+			ihost, ireq->post_context |
+				SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+	}
+	return status;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ *    completion operations for an IO request.  After this method is invoked,
+ *    the user should consider the IO request as invalid until it is properly
+ *    reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ *    IO request.
+ * @idev: The handle to the remote device object for which to complete
+ *    the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+					   struct isci_remote_device *idev,
+					   struct isci_request *ireq)
+{
+	enum sci_status status;
+	u16 index;
+
+	switch (ihost->sm.current_state_id) {
+	case SCIC_STOPPING:
+		/* XXX: Implement this function */
+		return SCI_FAILURE;
+	case SCIC_READY:
+		status = sci_remote_device_complete_io(ihost, idev, ireq);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		index = ISCI_TAG_TCI(ireq->io_tag);
+		clear_bit(IREQ_ACTIVE, &ireq->flags);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+	struct isci_host *ihost = ireq->owning_controller;
+
+	if (ihost->sm.current_state_id != SCIC_READY) {
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	set_bit(IREQ_ACTIVE, &ireq->flags);
+	sci_controller_post_request(ihost, ireq->post_context);
+	return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ *    send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ *    management request.
+ * @remote_device: the handle to the remote device object for which to start
+ *    the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+					       struct isci_remote_device *idev,
+					       struct isci_request *ireq)
+{
+	enum sci_status status;
+
+	if (ihost->sm.current_state_id != SCIC_READY) {
+		dev_warn(&ihost->pdev->dev,
+			 "%s: SCIC Controller starting task from invalid "
+			 "state\n",
+			 __func__);
+		return SCI_TASK_FAILURE_INVALID_STATE;
+	}
+
+	status = sci_remote_device_start_task(ihost, idev, ireq);
+	switch (status) {
+	case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+		set_bit(IREQ_ACTIVE, &ireq->flags);
+
+		/*
+		 * We will let framework know this task request started successfully,
+		 * although core is still woring on starting the request (to post tc when
+		 * RNC is resumed.)
+		 */
+		return SCI_SUCCESS;
+	case SCI_SUCCESS:
+		set_bit(IREQ_ACTIVE, &ireq->flags);
+		sci_controller_post_request(ihost, ireq->post_context);
+		break;
+	default:
+		break;
+	}
+
+	return status;
+}
+
+static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
+{
+	int d;
+
+	/* no support for TX_GP_CFG */
+	if (reg_index == 0)
+		return -EINVAL;
+
+	for (d = 0; d < isci_gpio_count(ihost); d++) {
+		u32 val = 0x444; /* all ODx.n clear */
+		int i;
+
+		for (i = 0; i < 3; i++) {
+			int bit = (i << 2) + 2;
+
+			bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
+						       write_data, reg_index,
+						       reg_count);
+			if (bit < 0)
+				break;
+
+			/* if od is set, clear the 'invert' bit */
+			val &= ~(bit << ((i << 2) + 2));
+		}
+
+		if (i < 3)
+			break;
+		writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
+	}
+
+	/* unless reg_index is > 1, we should always be able to write at
+	 * least one register
+	 */
+	return d > 0;
+}
+
+int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
+		    u8 reg_count, u8 *write_data)
+{
+	struct isci_host *ihost = sas_ha->lldd_ha;
+	int written;
+
+	switch (reg_type) {
+	case SAS_GPIO_REG_TX_GP:
+		written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
+		break;
+	default:
+		written = -EINVAL;
+	}
+
+	return written;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 0000000..22a9bb1
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,517 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include <scsi/sas_ata.h>
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+	/**
+	 * This field is set when the power control timer is running and cleared when
+	 * it is not.
+	 */
+	bool timer_started;
+
+	/**
+	 * Timer to control when the directed attached disks can consume power.
+	 */
+	struct sci_timer timer;
+
+	/**
+	 * This field is used to keep track of how many phys are put into the
+	 * requesters field.
+	 */
+	u8 phys_waiting;
+
+	/**
+	 * This field is used to keep track of how many phys have been granted to consume power
+	 */
+	u8 phys_granted_power;
+
+	/**
+	 * This field is an array of phys that we are waiting on. The phys are direct
+	 * mapped into requesters via struct sci_phy.phy_index
+	 */
+	struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+			       struct sci_port_configuration_agent *,
+			       struct isci_port *, struct isci_phy *);
+bool is_port_config_apc(struct isci_host *ihost);
+bool is_controller_start_complete(struct isci_host *ihost);
+
+struct sci_port_configuration_agent {
+	u16 phy_configured_mask;
+	u16 phy_ready_mask;
+	struct {
+		u8 min_index;
+		u8 max_index;
+	} phy_valid_port_range[SCI_MAX_PHYS];
+	bool timer_pending;
+	port_config_fn link_up_handler;
+	port_config_fn link_down_handler;
+	struct sci_timer	timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ * 		      the phy index is set so further notifications are not
+ * 		      made.  Once the phy reports link up and is made part of a
+ * 		      port then this bit is cleared.
+
+ */
+struct isci_host {
+	struct sci_base_state_machine sm;
+	/* XXX can we time this externally */
+	struct sci_timer timer;
+	/* XXX drop reference module params directly */
+	struct sci_user_parameters user_parameters;
+	/* XXX no need to be a union */
+	struct sci_oem_params oem_parameters;
+	struct sci_port_configuration_agent port_agent;
+	struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+	struct sci_remote_node_table available_remote_nodes;
+	struct sci_power_control power_control;
+	u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+	struct scu_task_context *task_context_table;
+	dma_addr_t tc_dma;
+	union scu_remote_node_context *remote_node_context_table;
+	dma_addr_t rnc_dma;
+	u32 *completion_queue;
+	dma_addr_t cq_dma;
+	u32 completion_queue_get;
+	u32 logical_port_entries;
+	u32 remote_node_entries;
+	u32 task_context_entries;
+	void *ufi_buf;
+	dma_addr_t ufi_dma;
+	struct sci_unsolicited_frame_control uf_control;
+
+	/* phy startup */
+	struct sci_timer phy_timer;
+	/* XXX kill */
+	bool phy_startup_timer_pending;
+	u32 next_phy_to_start;
+	/* XXX convert to unsigned long and use bitops */
+	u8 invalid_phy_mask;
+
+	/* TODO attempt dynamic interrupt coalescing scheme */
+	u16 interrupt_coalesce_number;
+	u32 interrupt_coalesce_timeout;
+	struct smu_registers __iomem *smu_registers;
+	struct scu_registers __iomem *scu_registers;
+
+	u16 tci_head;
+	u16 tci_tail;
+	u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+	int id; /* unique within a given pci device */
+	struct isci_phy phys[SCI_MAX_PHYS];
+	struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+	struct asd_sas_port sas_ports[SCI_MAX_PORTS];
+	struct sas_ha_struct sas_ha;
+
+	struct pci_dev *pdev;
+	#define IHOST_START_PENDING 0
+	#define IHOST_STOP_PENDING 1
+	#define IHOST_IRQ_ENABLED 2
+	unsigned long flags;
+	wait_queue_head_t eventq;
+	struct tasklet_struct completion_tasklet;
+	spinlock_t scic_lock;
+	struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+	struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ *    for the common controller state machine.
+ */
+enum sci_controller_states {
+	/**
+	 * Simply the initial state for the base controller state machine.
+	 */
+	SCIC_INITIAL = 0,
+
+	/**
+	 * This state indicates that the controller is reset.  The memory for
+	 * the controller is in it's initial state, but the controller requires
+	 * initialization.
+	 * This state is entered from the INITIAL state.
+	 * This state is entered from the RESETTING state.
+	 */
+	SCIC_RESET,
+
+	/**
+	 * This state is typically an action state that indicates the controller
+	 * is in the process of initialization.  In this state no new IO operations
+	 * are permitted.
+	 * This state is entered from the RESET state.
+	 */
+	SCIC_INITIALIZING,
+
+	/**
+	 * This state indicates that the controller has been successfully
+	 * initialized.  In this state no new IO operations are permitted.
+	 * This state is entered from the INITIALIZING state.
+	 */
+	SCIC_INITIALIZED,
+
+	/**
+	 * This state indicates the the controller is in the process of becoming
+	 * ready (i.e. starting).  In this state no new IO operations are permitted.
+	 * This state is entered from the INITIALIZED state.
+	 */
+	SCIC_STARTING,
+
+	/**
+	 * This state indicates the controller is now ready.  Thus, the user
+	 * is able to perform IO operations on the controller.
+	 * This state is entered from the STARTING state.
+	 */
+	SCIC_READY,
+
+	/**
+	 * This state is typically an action state that indicates the controller
+	 * is in the process of resetting.  Thus, the user is unable to perform
+	 * IO operations on the controller.  A reset is considered destructive in
+	 * most cases.
+	 * This state is entered from the READY state.
+	 * This state is entered from the FAILED state.
+	 * This state is entered from the STOPPED state.
+	 */
+	SCIC_RESETTING,
+
+	/**
+	 * This state indicates that the controller is in the process of stopping.
+	 * In this state no new IO operations are permitted, but existing IO
+	 * operations are allowed to complete.
+	 * This state is entered from the READY state.
+	 */
+	SCIC_STOPPING,
+
+	/**
+	 * This state indicates that the controller could not successfully be
+	 * initialized.  In this state no new IO operations are permitted.
+	 * This state is entered from the INITIALIZING state.
+	 * This state is entered from the STARTING state.
+	 * This state is entered from the STOPPING state.
+	 * This state is entered from the RESETTING state.
+	 */
+	SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ *    controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ *    the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+	struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+	struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+	struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+	return pci_get_drvdata(pdev);
+}
+
+static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+{
+	return ihost->sas_ha.core.shost;
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+	for (id = 0; id < SCI_MAX_CONTROLLERS && \
+	     (ihost = to_pci_info(pdev)->hosts[id]); id++)
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+	wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+	wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+	wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+	wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+	return dev->port->ha->lldd_ha;
+}
+
+static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
+{
+	return dev_to_ihost(idev->domain_dev);
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
+#define ISCI_COALESCE_BASE 9
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+	struct domain_device *dev = idev->domain_dev;
+
+	if (dev_is_sata(dev) && dev->parent)
+		return SCU_STP_REMOTE_NODE_COUNT;
+	return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object.  This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+	((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+	if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+		return NULL;
+
+	return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+	if (pdev->revision < 4)
+		return true;
+	return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+	if (pdev->revision == 4)
+		return true;
+	return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+	if (pdev->revision == 5)
+		return true;
+	return false;
+}
+
+static inline bool is_c1(struct pci_dev *pdev)
+{
+	if (pdev->revision >= 6)
+		return true;
+	return false;
+}
+
+enum cable_selections {
+	short_cable     = 0,
+	long_cable      = 1,
+	medium_cable    = 2,
+	undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+	return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
+/* set hw control for 'activity', even though active enclosures seem to drive
+ * the activity led on their own.  Skip setting FSENG control on 'status' due
+ * to unexpected operation and 'error' due to not being a supported automatic
+ * FSENG output
+ */
+#define SGPIO_HW_CONTROL 0x00000443
+
+static inline int isci_gpio_count(struct isci_host *ihost)
+{
+	return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select);
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+				      u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+				       u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+					    void *frame_header,
+					    void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+								 struct isci_remote_device *idev,
+								 u16 *node_id);
+void sci_controller_free_remote_node_context(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+					       struct isci_phy *iphy);
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+					       struct isci_phy *iphy);
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+			    struct isci_phy *iphy);
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+			      struct isci_phy *iphy);
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+					  struct isci_remote_device *idev);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
+
+int isci_host_init(struct isci_host *);
+void isci_host_completion_routine(unsigned long data);
+void isci_host_deinit(struct isci_host *);
+void sci_controller_disable_interrupts(struct isci_host *ihost);
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
+
+enum sci_status sci_controller_start_io(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+	struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+	struct isci_host *ihost,
+	struct sci_port_configuration_agent *port_agent);
+
+int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index,
+		    u8 reg_count, u8 *write_data);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 0000000..77128d6
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,809 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include <scsi/scsi_host.h>
+#include "host.h"
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+#define MAJ 1
+#define MIN 2
+#define BUILD 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+	__stringify(BUILD)
+
+MODULE_VERSION(DRV_VERSION);
+
+static struct scsi_transport_template *isci_transport_template;
+
+static const struct pci_device_id isci_id_table[] = {
+	{ PCI_VDEVICE(INTEL, 0x1D61),},
+	{ PCI_VDEVICE(INTEL, 0x1D63),},
+	{ PCI_VDEVICE(INTEL, 0x1D65),},
+	{ PCI_VDEVICE(INTEL, 0x1D67),},
+	{ PCI_VDEVICE(INTEL, 0x1D69),},
+	{ PCI_VDEVICE(INTEL, 0x1D6B),},
+	{ PCI_VDEVICE(INTEL, 0x1D60),},
+	{ PCI_VDEVICE(INTEL, 0x1D62),},
+	{ PCI_VDEVICE(INTEL, 0x1D64),},
+	{ PCI_VDEVICE(INTEL, 0x1D66),},
+	{ PCI_VDEVICE(INTEL, 0x1D68),},
+	{ PCI_VDEVICE(INTEL, 0x1D6A),},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 2;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+		 "This field indicates length of the SAS/SATA cable between "
+		 "host and device. If any bits > 15 are set (default) "
+		 "indicates \"use platform defaults\"");
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+	struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+struct device_attribute *isci_host_attrs[] = {
+	&dev_attr_isci_id,
+	NULL
+};
+
+static struct scsi_host_template isci_sht = {
+
+	.module				= THIS_MODULE,
+	.name				= DRV_NAME,
+	.proc_name			= DRV_NAME,
+	.queuecommand			= sas_queuecommand,
+	.target_alloc			= sas_target_alloc,
+	.slave_configure		= sas_slave_configure,
+	.scan_finished			= isci_host_scan_finished,
+	.scan_start			= isci_host_start,
+	.change_queue_depth		= sas_change_queue_depth,
+	.bios_param			= sas_bios_param,
+	.can_queue			= ISCI_CAN_QUEUE_VAL,
+	.this_id			= -1,
+	.sg_tablesize			= SG_ALL,
+	.max_sectors			= SCSI_DEFAULT_MAX_SECTORS,
+	.use_clustering			= ENABLE_CLUSTERING,
+	.eh_abort_handler		= sas_eh_abort_handler,
+	.eh_device_reset_handler        = sas_eh_device_reset_handler,
+	.eh_bus_reset_handler           = sas_eh_bus_reset_handler,
+	.target_destroy			= sas_target_destroy,
+	.ioctl				= sas_ioctl,
+	.shost_attrs			= isci_host_attrs,
+	.track_queue_depth		= 1,
+};
+
+static struct sas_domain_function_template isci_transport_ops  = {
+
+	/* The class calls these to notify the LLDD of an event. */
+	.lldd_port_formed	= isci_port_formed,
+	.lldd_port_deformed	= isci_port_deformed,
+
+	/* The class calls these when a device is found or gone. */
+	.lldd_dev_found		= isci_remote_device_found,
+	.lldd_dev_gone		= isci_remote_device_gone,
+
+	.lldd_execute_task	= isci_task_execute_task,
+	/* Task Management Functions. Must be called from process context. */
+	.lldd_abort_task	= isci_task_abort_task,
+	.lldd_abort_task_set	= isci_task_abort_task_set,
+	.lldd_clear_aca		= isci_task_clear_aca,
+	.lldd_clear_task_set	= isci_task_clear_task_set,
+	.lldd_I_T_nexus_reset	= isci_task_I_T_nexus_reset,
+	.lldd_lu_reset		= isci_task_lu_reset,
+	.lldd_query_task	= isci_task_query_task,
+
+	/* ata recovery called from ata-eh */
+	.lldd_ata_check_ready	= isci_ata_check_ready,
+
+	/* Port and Adapter management */
+	.lldd_clear_nexus_port	= isci_task_clear_nexus_port,
+	.lldd_clear_nexus_ha	= isci_task_clear_nexus_ha,
+
+	/* Phy management */
+	.lldd_control_phy	= isci_phy_control,
+
+	/* GPIO support */
+	.lldd_write_gpio	= isci_gpio_write,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D  M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ *    specific members of the sas_ha struct and calls the libsas
+ *    sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ *    libsas sas_ha struct.
+ *
+ * This method returns an error code indicating success or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+	int i;
+	struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+	struct asd_sas_phy **sas_phys;
+	struct asd_sas_port **sas_ports;
+
+	sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+				SCI_MAX_PHYS * sizeof(void *),
+				GFP_KERNEL);
+	if (!sas_phys)
+		return -ENOMEM;
+
+	sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+				 SCI_MAX_PORTS * sizeof(void *),
+				 GFP_KERNEL);
+	if (!sas_ports)
+		return -ENOMEM;
+
+	sas_ha->sas_ha_name = DRV_NAME;
+	sas_ha->lldd_module = THIS_MODULE;
+	sas_ha->sas_addr    = &isci_host->phys[0].sas_addr[0];
+
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		sas_phys[i] = &isci_host->phys[i].sas_phy;
+		sas_ports[i] = &isci_host->sas_ports[i];
+	}
+
+	sas_ha->sas_phy  = sas_phys;
+	sas_ha->sas_port = sas_ports;
+	sas_ha->num_phys = SCI_MAX_PHYS;
+
+	sas_ha->strict_wide_ports = 1;
+
+	sas_register_ha(sas_ha);
+
+	return 0;
+}
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+	struct Scsi_Host *shost;
+
+	if (!isci_host)
+		return;
+
+	shost = to_shost(isci_host);
+	scsi_remove_host(shost);
+	sas_unregister_ha(&isci_host->sas_ha);
+
+	sas_remove_host(shost);
+	scsi_host_put(shost);
+}
+
+static int isci_pci_init(struct pci_dev *pdev)
+{
+	int err, bar_num, bar_mask = 0;
+	void __iomem * const *iomap;
+
+	err = pcim_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"failed enable PCI device %s!\n",
+			pci_name(pdev));
+		return err;
+	}
+
+	for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+		bar_mask |= 1 << (bar_num * 2);
+
+	err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+	if (err)
+		return err;
+
+	iomap = pcim_iomap_table(pdev);
+	if (!iomap)
+		return -ENOMEM;
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err)
+			return err;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+	/* bar size alone can tell us if we are running with a dual controller
+	 * part, no need to trust revision ids that might be under broken firmware
+	 * control
+	 */
+	resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+	resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+	if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+	    smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+		return SCI_MAX_CONTROLLERS;
+	else
+		return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+	int err, i, num_msix;
+	struct isci_host *ihost;
+	struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+	/*
+	 *  Determine the number of vectors associated with this
+	 *  PCI function.
+	 */
+	num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+	for (i = 0; i < num_msix; i++)
+		pci_info->msix_entries[i].entry = i;
+
+	err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
+	if (err)
+		goto intx;
+
+	for (i = 0; i < num_msix; i++) {
+		int id = i / SCI_NUM_MSI_X_INT;
+		struct msix_entry *msix = &pci_info->msix_entries[i];
+		irq_handler_t isr;
+
+		ihost = pci_info->hosts[id];
+		/* odd numbered vectors are error interrupts */
+		if (i & 1)
+			isr = isci_error_isr;
+		else
+			isr = isci_msix_isr;
+
+		err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+				       DRV_NAME"-msix", ihost);
+		if (!err)
+			continue;
+
+		dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+		while (i--) {
+			id = i / SCI_NUM_MSI_X_INT;
+			ihost = pci_info->hosts[id];
+			msix = &pci_info->msix_entries[i];
+			devm_free_irq(&pdev->dev, msix->vector, ihost);
+		}
+		pci_disable_msix(pdev);
+		goto intx;
+	}
+	return 0;
+
+ intx:
+	for_each_isci_host(i, ihost, pdev) {
+		err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+				       IRQF_SHARED, DRV_NAME"-intx", ihost);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+	int i;
+
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		struct sci_phy_user_params *u_phy = &u->phys[i];
+
+		u_phy->max_speed_generation = phy_gen;
+
+		/* we are not exporting these for now */
+		u_phy->align_insertion_frequency = 0x7f;
+		u_phy->in_connection_align_insertion_frequency = 0xff;
+		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+	}
+
+	u->stp_inactivity_timeout = stp_inactive_to;
+	u->ssp_inactivity_timeout = ssp_inactive_to;
+	u->stp_max_occupancy_timeout = stp_max_occ_to;
+	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+	u->no_outbound_task_timeout = no_outbound_task_to;
+	u->max_concurr_spinup = max_concurr_spinup;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+					       struct sci_user_parameters *sci_parms)
+{
+	u16 index;
+
+	/*
+	 * Validate the user parameters.  If they are not legal, then
+	 * return a failure.
+	 */
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		struct sci_phy_user_params *u;
+
+		u = &sci_parms->phys[index];
+
+		if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
+		      (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+		if (u->in_connection_align_insertion_frequency < 3)
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+		if ((u->in_connection_align_insertion_frequency < 3) ||
+		    (u->align_insertion_frequency == 0) ||
+		    (u->notify_enable_spin_up_insertion_frequency == 0))
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+	}
+
+	if ((sci_parms->stp_inactivity_timeout == 0) ||
+	    (sci_parms->ssp_inactivity_timeout == 0) ||
+	    (sci_parms->stp_max_occupancy_timeout == 0) ||
+	    (sci_parms->ssp_max_occupancy_timeout == 0) ||
+	    (sci_parms->no_outbound_task_timeout == 0))
+		return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+	memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+	return SCI_SUCCESS;
+}
+
+static void sci_oem_defaults(struct isci_host *ihost)
+{
+	/* these defaults are overridden by the platform / firmware */
+	struct sci_user_parameters *user = &ihost->user_parameters;
+	struct sci_oem_params *oem = &ihost->oem_parameters;
+	int i;
+
+	/* Default to APC mode. */
+	oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+	/* Default to APC mode. */
+	oem->controller.max_concurr_spin_up = 1;
+
+	/* Default to no SSC operation. */
+	oem->controller.do_enable_ssc = false;
+
+	/* Default to short cables on all phys. */
+	oem->controller.cable_selection_mask = 0;
+
+	/* Initialize all of the port parameter information to narrow ports. */
+	for (i = 0; i < SCI_MAX_PORTS; i++)
+		oem->ports[i].phy_mask = 0;
+
+	/* Initialize all of the phy parameter information. */
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		/* Default to 3G (i.e. Gen 2). */
+		user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
+
+		/* the frequencies cannot be 0 */
+		user->phys[i].align_insertion_frequency = 0x7f;
+		user->phys[i].in_connection_align_insertion_frequency = 0xff;
+		user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
+
+		/* Previous Vitesse based expanders had a arbitration issue that
+		 * is worked around by having the upper 32-bits of SAS address
+		 * with a value greater then the Vitesse company identifier.
+		 * Hence, usage of 0x5FCFFFFF.
+		 */
+		oem->phys[i].sas_address.low = 0x1 + ihost->id;
+		oem->phys[i].sas_address.high = 0x5FCFFFFF;
+	}
+
+	user->stp_inactivity_timeout = 5;
+	user->ssp_inactivity_timeout = 5;
+	user->stp_max_occupancy_timeout = 5;
+	user->ssp_max_occupancy_timeout = 20;
+	user->no_outbound_task_timeout = 2;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+	struct isci_orom *orom = to_pci_info(pdev)->orom;
+	struct sci_user_parameters sci_user_params;
+	u8 oem_version = ISCI_ROM_VER_1_0;
+	struct isci_host *ihost;
+	struct Scsi_Host *shost;
+	int err, i;
+
+	ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
+	if (!ihost)
+		return NULL;
+
+	ihost->pdev = pdev;
+	ihost->id = id;
+	spin_lock_init(&ihost->scic_lock);
+	init_waitqueue_head(&ihost->eventq);
+	ihost->sas_ha.dev = &ihost->pdev->dev;
+	ihost->sas_ha.lldd_ha = ihost;
+	tasklet_init(&ihost->completion_tasklet,
+		     isci_host_completion_routine, (unsigned long)ihost);
+
+	/* validate module parameters */
+	/* TODO: kill struct sci_user_parameters and reference directly */
+	sci_oem_defaults(ihost);
+	isci_user_parameters_get(&sci_user_params);
+	if (sci_user_parameters_set(ihost, &sci_user_params)) {
+		dev_warn(&pdev->dev,
+			 "%s: sci_user_parameters_set failed\n", __func__);
+		return NULL;
+	}
+
+	/* sanity check platform (or 'firmware') oem parameters */
+	if (orom) {
+		if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
+			dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
+			return NULL;
+		}
+		ihost->oem_parameters = orom->ctrl[id];
+		oem_version = orom->hdr.version;
+	}
+
+	/* validate oem parameters (platform, firmware, or built-in defaults) */
+	if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
+		dev_warn(&pdev->dev, "oem parameter validation failed\n");
+		return NULL;
+	}
+
+	for (i = 0; i < SCI_MAX_PORTS; i++) {
+		struct isci_port *iport = &ihost->ports[i];
+
+		INIT_LIST_HEAD(&iport->remote_dev_list);
+		iport->isci_host = ihost;
+	}
+
+	for (i = 0; i < SCI_MAX_PHYS; i++)
+		isci_phy_init(&ihost->phys[i], ihost, i);
+
+	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+		struct isci_remote_device *idev = &ihost->devices[i];
+
+		INIT_LIST_HEAD(&idev->node);
+	}
+
+	shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+	if (!shost)
+		return NULL;
+
+	dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+		 "{%s, %s, %s, %s}\n",
+		 (is_cable_select_overridden() ? "* " : ""), ihost->id,
+		 lookup_cable_names(decode_cable_selection(ihost, 3)),
+		 lookup_cable_names(decode_cable_selection(ihost, 2)),
+		 lookup_cable_names(decode_cable_selection(ihost, 1)),
+		 lookup_cable_names(decode_cable_selection(ihost, 0)));
+
+	err = isci_host_init(ihost);
+	if (err)
+		goto err_shost;
+
+	SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
+	ihost->sas_ha.core.shost = shost;
+	shost->transportt = isci_transport_template;
+
+	shost->max_id = ~0;
+	shost->max_lun = ~0;
+	shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+	err = scsi_add_host(shost, &pdev->dev);
+	if (err)
+		goto err_shost;
+
+	err = isci_register_sas_ha(ihost);
+	if (err)
+		goto err_shost_remove;
+
+	return ihost;
+
+ err_shost_remove:
+	scsi_remove_host(shost);
+ err_shost:
+	scsi_host_put(shost);
+
+	return NULL;
+}
+
+static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct isci_pci_info *pci_info;
+	int err, i;
+	struct isci_host *isci_host;
+	const struct firmware *fw = NULL;
+	struct isci_orom *orom = NULL;
+	char *source = "(platform)";
+
+	dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+		 pdev->revision);
+
+	pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+	if (!pci_info)
+		return -ENOMEM;
+	pci_set_drvdata(pdev, pci_info);
+
+	if (efi_enabled(EFI_RUNTIME_SERVICES))
+		orom = isci_get_efi_var(pdev);
+
+	if (!orom)
+		orom = isci_request_oprom(pdev);
+
+	for (i = 0; orom && i < num_controllers(pdev); i++) {
+		if (sci_oem_parameters_validate(&orom->ctrl[i],
+						orom->hdr.version)) {
+			dev_warn(&pdev->dev,
+				 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+			orom = NULL;
+			break;
+		}
+	}
+
+	if (!orom) {
+		source = "(firmware)";
+		orom = isci_request_firmware(pdev, fw);
+		if (!orom) {
+			/* TODO convert this to WARN_TAINT_ONCE once the
+			 * orom/efi parameter support is widely available
+			 */
+			dev_warn(&pdev->dev,
+				 "Loading user firmware failed, using default "
+				 "values\n");
+			dev_warn(&pdev->dev,
+				 "Default OEM configuration being used: 4 "
+				 "narrow ports, and default SAS Addresses\n");
+		}
+	}
+
+	if (orom)
+		dev_info(&pdev->dev,
+			 "OEM SAS parameters (version: %u.%u) loaded %s\n",
+			 (orom->hdr.version & 0xf0) >> 4,
+			 (orom->hdr.version & 0xf), source);
+
+	pci_info->orom = orom;
+
+	err = isci_pci_init(pdev);
+	if (err)
+		return err;
+
+	for (i = 0; i < num_controllers(pdev); i++) {
+		struct isci_host *h = isci_host_alloc(pdev, i);
+
+		if (!h) {
+			err = -ENOMEM;
+			goto err_host_alloc;
+		}
+		pci_info->hosts[i] = h;
+
+		/* turn on DIF support */
+		scsi_host_set_prot(to_shost(h),
+				   SHOST_DIF_TYPE1_PROTECTION |
+				   SHOST_DIF_TYPE2_PROTECTION |
+				   SHOST_DIF_TYPE3_PROTECTION);
+		scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
+	}
+
+	err = isci_setup_interrupts(pdev);
+	if (err)
+		goto err_host_alloc;
+
+	for_each_isci_host(i, isci_host, pdev)
+		scsi_scan_host(to_shost(isci_host));
+
+	return 0;
+
+ err_host_alloc:
+	for_each_isci_host(i, isci_host, pdev)
+		isci_unregister(isci_host);
+	return err;
+}
+
+static void isci_pci_remove(struct pci_dev *pdev)
+{
+	struct isci_host *ihost;
+	int i;
+
+	for_each_isci_host(i, ihost, pdev) {
+		wait_for_start(ihost);
+		isci_unregister(ihost);
+		isci_host_deinit(ihost);
+	}
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int isci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct isci_host *ihost;
+	int i;
+
+	for_each_isci_host(i, ihost, pdev) {
+		sas_suspend_ha(&ihost->sas_ha);
+		isci_host_deinit(ihost);
+	}
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int isci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct isci_host *ihost;
+	int rc, i;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	rc = pcim_enable_device(pdev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"enabling device failure after resume(%d)\n", rc);
+		return rc;
+	}
+
+	pci_set_master(pdev);
+
+	for_each_isci_host(i, ihost, pdev) {
+		sas_prep_resume_ha(&ihost->sas_ha);
+
+		isci_host_init(ihost);
+		isci_host_start(ihost->sas_ha.core.shost);
+		wait_for_start(ihost);
+
+		sas_resume_ha(&ihost->sas_ha);
+	}
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
+
+static struct pci_driver isci_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= isci_id_table,
+	.probe		= isci_pci_probe,
+	.remove		= isci_pci_remove,
+	.driver.pm      = &isci_pm_ops,
+};
+
+static __init int isci_init(void)
+{
+	int err;
+
+	pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
+		DRV_NAME, DRV_VERSION);
+
+	isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+	if (!isci_transport_template)
+		return -ENOMEM;
+
+	err = pci_register_driver(&isci_pci_driver);
+	if (err)
+		sas_release_transport(isci_transport_template);
+
+	return err;
+}
+
+static __exit void isci_exit(void)
+{
+	pci_unregister_driver(&isci_pci_driver);
+	sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 0000000..234ab46
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,539 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR       0
+#define SCI_SMU_BAR_SIZE  (16*1024)
+#define SCI_SCU_BAR       1
+#define SCI_SCU_BAR_SIZE  (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS  (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS  (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES  (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS  SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS    (384)
+#define SCU_MAX_EVENTS_SHIFT		  (7)
+#define SCU_MAX_EVENTS                    (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES        (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH  (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES  (SCU_MAX_CRITICAL_NOTIFICATIONS \
+					   + SCU_MAX_EVENTS \
+					   + SCU_MAX_UNSOLICITED_FRAMES	\
+					   + SCI_MAX_IO_REQUESTS \
+					   + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT	  (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE   (1024U)
+#define SCU_INVALID_FRAME_INDEX             (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE         (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH  (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+	BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+	BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+	BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+	BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+	BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+	BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+	BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ *    non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+	/**
+	 * This member indicates successful completion.
+	 */
+	SCI_SUCCESS = 0,
+
+	/**
+	 * This value indicates that the calling method completed successfully,
+	 * but that the IO may have completed before having it's start method
+	 * invoked.  This occurs during SAT translation for requests that do
+	 * not require an IO to the target or for any other requests that may
+	 * be completed without having to submit IO.
+	 */
+	SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+	/**
+	 *  This Value indicates that the SCU hardware returned an early response
+	 *  because the io request specified more data than is returned by the
+	 *  target device (mode pages, inquiry data, etc.). The completion routine
+	 *  will handle this case to get the actual number of bytes transferred.
+	 */
+	SCI_SUCCESS_IO_DONE_EARLY,
+
+	/**
+	 * This member indicates that the object for which a state change is
+	 * being requested is already in said state.
+	 */
+	SCI_WARNING_ALREADY_IN_STATE,
+
+	/**
+	 * This member indicates interrupt coalescence timer may cause SAS
+	 * specification compliance issues (i.e. SMP target mode response
+	 * frames must be returned within 1.9 milliseconds).
+	 */
+	SCI_WARNING_TIMER_CONFLICT,
+
+	/**
+	 * This field indicates a sequence of action is not completed yet. Mostly,
+	 * this status is used when multiple ATA commands are needed in a SATI translation.
+	 */
+	SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+	/**
+	 * This member indicates that there was a general failure.
+	 */
+	SCI_FAILURE,
+
+	/**
+	 * This member indicates that the SCI implementation is unable to complete
+	 * an operation due to a critical flaw the prevents any further operation
+	 * (i.e. an invalid pointer).
+	 */
+	SCI_FATAL_ERROR,
+
+	/**
+	 * This member indicates the calling function failed, because the state
+	 * of the controller is in a state that prevents successful completion.
+	 */
+	SCI_FAILURE_INVALID_STATE,
+
+	/**
+	 * This member indicates the calling function failed, because there is
+	 * insufficient resources/memory to complete the request.
+	 */
+	SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * controller object required for the operation can't be located.
+	 */
+	SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * discovered controller type is not supported by the library.
+	 */
+	SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * requested initialization data version isn't supported.
+	 */
+	SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * requested configuration of SAS Phys into SAS Ports is not supported.
+	 */
+	SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * requested protocol is not supported by the remote device, port,
+	 * or controller.
+	 */
+	SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * requested information type is not supported by the SCI implementation.
+	 */
+	SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * device already exists.
+	 */
+	SCI_FAILURE_DEVICE_EXISTS,
+
+	/**
+	 * This member indicates the calling function failed, because adding
+	 * a phy to the object is not possible.
+	 */
+	SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+	/**
+	 * This member indicates the calling function failed, because the
+	 * requested information type is not supported by the SCI implementation.
+	 */
+	SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+	/**
+	 * This member indicates the calling function failed, because the SCI
+	 * implementation does not support the supplied time limit.
+	 */
+	SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+	/**
+	 * This member indicates the calling method failed, because the SCI
+	 * implementation does not contain the specified Phy.
+	 */
+	SCI_FAILURE_INVALID_PHY,
+
+	/**
+	 * This member indicates the calling method failed, because the SCI
+	 * implementation does not contain the specified Port.
+	 */
+	SCI_FAILURE_INVALID_PORT,
+
+	/**
+	 * This member indicates the calling method was partly successful
+	 * The port was reset but not all phys in port are operational
+	 */
+	SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+	/**
+	 * This member indicates that calling method failed
+	 * The port reset did not complete because none of the phys are operational
+	 */
+	SCI_FAILURE_RESET_PORT_FAILURE,
+
+	/**
+	 * This member indicates the calling method failed, because the SCI
+	 * implementation does not contain the specified remote device.
+	 */
+	SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+	/**
+	 * This member indicates the calling method failed, because the remote
+	 * device is in a bad state and requires a reset.
+	 */
+	SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+	/**
+	 * This member indicates the calling method failed, because the SCI
+	 * implementation does not contain or support the specified IO tag.
+	 */
+	SCI_FAILURE_INVALID_IO_TAG,
+
+	/**
+	 * This member indicates that the operation failed and the user should
+	 * check the response data associated with the IO.
+	 */
+	SCI_FAILURE_IO_RESPONSE_VALID,
+
+	/**
+	 * This member indicates that the operation failed, the failure is
+	 * controller implementation specific, and the response data associated
+	 * with the request is not valid.  You can query for the controller
+	 * specific error information via sci_controller_get_request_status()
+	 */
+	SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+	/**
+	 * This member indicated that the operation failed because the
+	 * user requested this IO to be terminated.
+	 */
+	SCI_FAILURE_IO_TERMINATED,
+
+	/**
+	 * This member indicates that the operation failed and the associated
+	 * request requires a SCSI abort task to be sent to the target.
+	 */
+	SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+	/**
+	 * This member indicates that the operation failed because the supplied
+	 * device could not be located.
+	 */
+	SCI_FAILURE_DEVICE_NOT_FOUND,
+
+	/**
+	 * This member indicates that the operation failed because the
+	 * objects association is required and is not correctly set.
+	 */
+	SCI_FAILURE_INVALID_ASSOCIATION,
+
+	/**
+	 * This member indicates that the operation failed, because a timeout
+	 * occurred.
+	 */
+	SCI_FAILURE_TIMEOUT,
+
+	/**
+	 * This member indicates that the operation failed, because the user
+	 * specified a value that is either invalid or not supported.
+	 */
+	SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+	/**
+	 * This value indicates that the operation failed, because the number
+	 * of messages (MSI-X) is not supported.
+	 */
+	SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+	/**
+	 * This value indicates that the method failed due to a lack of
+	 * available NCQ tags.
+	 */
+	SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+	/**
+	 * This value indicates that a protocol violation has occurred on the
+	 * link.
+	 */
+	SCI_FAILURE_PROTOCOL_VIOLATION,
+
+	/**
+	 * This value indicates a failure condition that retry may help to clear.
+	 */
+	SCI_FAILURE_RETRY_REQUIRED,
+
+	/**
+	 * This field indicates the retry limit was reached when a retry is attempted
+	 */
+	SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+	/**
+	 * This member indicates the calling method was partly successful.
+	 * Mostly, this status is used when a LUN_RESET issued to an expander attached
+	 * STP device in READY NCQ substate needs to have RNC suspended/resumed
+	 * before posting TC.
+	 */
+	SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+	/**
+	 * This field indicates an illegal phy connection based on the routing attribute
+	 * of both expander phy attached to each other.
+	 */
+	SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+	/**
+	 * This field indicates a CONFIG ROUTE INFO command has a response with function result
+	 * INDEX DOES NOT EXIST, usually means exceeding max route index.
+	 */
+	SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+	/**
+	 * This value indicates that an unsupported PCI device ID has been
+	 * specified.  This indicates that attempts to invoke
+	 * sci_library_allocate_controller() will fail.
+	 */
+	SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+	SCI_IO_SUCCESS                         = SCI_SUCCESS,
+	SCI_IO_FAILURE                         = SCI_FAILURE,
+	SCI_IO_SUCCESS_COMPLETE_BEFORE_START   = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+	SCI_IO_SUCCESS_IO_DONE_EARLY           = SCI_SUCCESS_IO_DONE_EARLY,
+	SCI_IO_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+	SCI_IO_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+	SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+	SCI_IO_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+	SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+	SCI_IO_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+	SCI_IO_FAILURE_REQUIRES_SCSI_ABORT     = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+	SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+	SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE    = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+	SCI_IO_FAILURE_PROTOCOL_VIOLATION      = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+	SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+	SCI_IO_FAILURE_RETRY_REQUIRED      = SCI_FAILURE_RETRY_REQUIRED,
+	SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+	SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+	SCI_TASK_SUCCESS                         = SCI_SUCCESS,
+	SCI_TASK_FAILURE                         = SCI_FAILURE,
+	SCI_TASK_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+	SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+	SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+	SCI_TASK_FAILURE_INVALID_TAG             = SCI_FAILURE_INVALID_IO_TAG,
+	SCI_TASK_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+	SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+	SCI_TASK_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+	SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+	SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+	SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order.  Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+	u32 *dest = _dest, *src = _src;
+
+	while (--word_cnt >= 0)
+		dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+	struct timer_list	timer;
+	bool			cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+	tmr->timer.function = fn;
+	tmr->timer.data = (unsigned long) tmr;
+	tmr->cancel = 0;
+	init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+	tmr->cancel = 0;
+	mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+	tmr->cancel = 1;
+	del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+	const struct sci_base_state *state_table;
+	u32 initial_state_id;
+	u32 current_state_id;
+	u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+	sci_state_transition_t enter_state;	/* Called on state entry */
+	sci_state_transition_t exit_state;	/* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+			const struct sci_base_state *state_table,
+			u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif  /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 0000000..cb87b2e
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1487 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+#undef C
+#define C(a) (#a)
+static const char *phy_state_name(enum sci_phy_states state)
+{
+	static const char * const strings[] = PHY_STATES;
+
+	return strings[state];
+}
+#undef C
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME  (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+	return iphy->max_negotiated_speed;
+}
+
+static struct isci_host *phy_to_host(struct isci_phy *iphy)
+{
+	struct isci_phy *table = iphy - iphy->phy_index;
+	struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
+
+	return ihost;
+}
+
+static struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+	return &phy_to_host(iphy)->pdev->dev;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+				       struct scu_transport_layer_registers __iomem *reg)
+{
+	u32 tl_control;
+
+	iphy->transport_layer_registers = reg;
+
+	writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+		&iphy->transport_layer_registers->stp_rni);
+
+	/*
+	 * Hardware team recommends that we enable the STP prefetch for all
+	 * transports
+	 */
+	tl_control = readl(&iphy->transport_layer_registers->control);
+	tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+	writel(tl_control, &iphy->transport_layer_registers->control);
+
+	return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+				  struct scu_link_layer_registers __iomem *llr)
+{
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+	struct sci_phy_user_params *phy_user;
+	struct sci_phy_oem_params *phy_oem;
+	int phy_idx = iphy->phy_index;
+	struct sci_phy_cap phy_cap;
+	u32 phy_configuration;
+	u32 parity_check = 0;
+	u32 parity_count = 0;
+	u32 llctl, link_rate;
+	u32 clksm_value = 0;
+	u32 sp_timeouts = 0;
+
+	phy_user = &ihost->user_parameters.phys[phy_idx];
+	phy_oem = &ihost->oem_parameters.phys[phy_idx];
+	iphy->link_layer_registers = llr;
+
+	/* Set our IDENTIFY frame data */
+	#define SCI_END_DEVICE 0x01
+
+	writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+	       SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+	       SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+	       SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+	       SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+	       &llr->transmit_identification);
+
+	/* Write the device SAS Address */
+	writel(0xFEDCBA98, &llr->sas_device_name_high);
+	writel(phy_idx, &llr->sas_device_name_low);
+
+	/* Write the source SAS Address */
+	writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+	writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
+
+	/* Clear and Set the PHY Identifier */
+	writel(0, &llr->identify_frame_phy_id);
+	writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
+
+	/* Change the initial state of the phy configuration register */
+	phy_configuration = readl(&llr->phy_configuration);
+
+	/* Hold OOB state machine in reset */
+	phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+	writel(phy_configuration, &llr->phy_configuration);
+
+	/* Configure the SNW capabilities */
+	phy_cap.all = 0;
+	phy_cap.start = 1;
+	phy_cap.gen3_no_ssc = 1;
+	phy_cap.gen2_no_ssc = 1;
+	phy_cap.gen1_no_ssc = 1;
+	if (ihost->oem_parameters.controller.do_enable_ssc) {
+		struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+		struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx];
+		struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+		bool en_sas = false;
+		bool en_sata = false;
+		u32 sas_type = 0;
+		u32 sata_spread = 0x2;
+		u32 sas_spread = 0x2;
+
+		phy_cap.gen3_ssc = 1;
+		phy_cap.gen2_ssc = 1;
+		phy_cap.gen1_ssc = 1;
+
+		if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+			en_sas = en_sata = true;
+		else {
+			sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+			sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+			if (sata_spread)
+				en_sata = true;
+
+			if (sas_spread) {
+				en_sas = true;
+				sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+			}
+
+		}
+
+		if (en_sas) {
+			u32 reg;
+
+			reg = readl(&xcvr->afe_xcvr_control0);
+			reg |= (0x00100000 | (sas_type << 19));
+			writel(reg, &xcvr->afe_xcvr_control0);
+
+			reg = readl(&xcvr->afe_tx_ssc_control);
+			reg |= sas_spread << 8;
+			writel(reg, &xcvr->afe_tx_ssc_control);
+		}
+
+		if (en_sata) {
+			u32 reg;
+
+			reg = readl(&xcvr->afe_tx_ssc_control);
+			reg |= sata_spread;
+			writel(reg, &xcvr->afe_tx_ssc_control);
+
+			reg = readl(&llr->stp_control);
+			reg |= 1 << 12;
+			writel(reg, &llr->stp_control);
+		}
+	}
+
+	/* The SAS specification indicates that the phy_capabilities that
+	 * are transmitted shall have an even parity.  Calculate the parity.
+	 */
+	parity_check = phy_cap.all;
+	while (parity_check != 0) {
+		if (parity_check & 0x1)
+			parity_count++;
+		parity_check >>= 1;
+	}
+
+	/* If parity indicates there are an odd number of bits set, then
+	 * set the parity bit to 1 in the phy capabilities.
+	 */
+	if ((parity_count % 2) != 0)
+		phy_cap.parity = 1;
+
+	writel(phy_cap.all, &llr->phy_capabilities);
+
+	/* Set the enable spinup period but disable the ability to send
+	 * notify enable spinup
+	 */
+	writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+			phy_user->notify_enable_spin_up_insertion_frequency),
+		&llr->notify_enable_spinup_control);
+
+	/* Write the ALIGN Insertion Ferequency for connected phy and
+	 * inpendent of connected state
+	 */
+	clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+			phy_user->in_connection_align_insertion_frequency);
+
+	clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+			phy_user->align_insertion_frequency);
+
+	writel(clksm_value, &llr->clock_skew_management);
+
+	if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+		writel(0x04210400, &llr->afe_lookup_table_control);
+		writel(0x020A7C05, &llr->sas_primitive_timeout);
+	} else
+		writel(0x02108421, &llr->afe_lookup_table_control);
+
+	llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+		(u8)ihost->user_parameters.no_outbound_task_timeout);
+
+	switch (phy_user->max_speed_generation) {
+	case SCIC_SDS_PARM_GEN3_SPEED:
+		link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+		break;
+	case SCIC_SDS_PARM_GEN2_SPEED:
+		link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+		break;
+	default:
+		link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+		break;
+	}
+	llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+	writel(llctl, &llr->link_layer_control);
+
+	sp_timeouts = readl(&llr->sas_phy_timeouts);
+
+	/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
+	sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
+
+	/* Set RATE_CHANGE timeout value to 0x3B (59us).  This ensures SCU can
+	 * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
+	 */
+	sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
+
+	writel(sp_timeouts, &llr->sas_phy_timeouts);
+
+	if (is_a2(ihost->pdev)) {
+		/* Program the max ARB time for the PHY to 700us so we
+		 * inter-operate with the PMC expander which shuts down
+		 * PHYs if the expander PHY generates too many breaks.
+		 * This time value will guarantee that the initiator PHY
+		 * will generate the break.
+		 */
+		writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+		       &llr->maximum_arbitration_wait_timer_timeout);
+	}
+
+	/* Disable link layer hang detection, rely on the OS timeout for
+	 * I/O timeouts.
+	 */
+	writel(0, &llr->link_layer_hang_detection_timeout);
+
+	/* We can exit the initial state to the stopped state */
+	sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+	return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	dev_dbg(sciphy_to_dev(iphy),
+		 "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+		 "timeout.\n",
+		 __func__,
+		 iphy);
+
+	sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ *    currently contained by the dummy port, then the phy is considered to not
+ *    be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ *    containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+	struct isci_port *iport = iphy->owning_port;
+
+	if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+		return NULL;
+
+	return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ *    object.
+ *
+ *
+ */
+void sci_phy_set_port(
+	struct isci_phy *iphy,
+	struct isci_port *iport)
+{
+	iphy->owning_port = iport;
+
+	if (iphy->bcn_received_while_port_unassigned) {
+		iphy->bcn_received_while_port_unassigned = false;
+		sci_port_broadcast_change_received(iphy->owning_port, iphy);
+	}
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+				   struct scu_transport_layer_registers __iomem *tl,
+				   struct scu_link_layer_registers __iomem *ll)
+{
+	/* Perfrom the initialization of the TL hardware */
+	sci_phy_transport_layer_initialization(iphy, tl);
+
+	/* Perofrm the initialization of the PE hardware */
+	sci_phy_link_layer_initialization(iphy, ll);
+
+	/* There is nothing that needs to be done in this state just
+	 * transition to the stopped state
+	 */
+	sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+	return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ *       be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ *       This will either be the RNi for the device or an invalid RNi if there
+ *       is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+	u32 tl_control;
+
+	writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+	/*
+	 * The read should guarantee that the first write gets posted
+	 * before the next write
+	 */
+	tl_control = readl(&iphy->transport_layer_registers->control);
+	tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+	writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+	u32 scu_sas_pcfg_value;
+
+	scu_sas_pcfg_value =
+		readl(&iphy->link_layer_registers->phy_configuration);
+	scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+	writel(scu_sas_pcfg_value,
+		&iphy->link_layer_registers->phy_configuration);
+
+	sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+	u32 scu_sas_pcfg_value;
+
+	scu_sas_pcfg_value =
+		readl(&iphy->link_layer_registers->phy_configuration);
+	scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+	writel(scu_sas_pcfg_value,
+		&iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+	sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+	sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+	struct sas_identify_frame *iaf;
+
+	iaf = &iphy->frame_rcvd.iaf;
+	memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+	proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+	enum sci_phy_states state = iphy->sm.current_state_id;
+
+	if (state != SCI_PHY_STOPPED) {
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+	enum sci_phy_states state = iphy->sm.current_state_id;
+
+	switch (state) {
+	case SCI_PHY_SUB_INITIAL:
+	case SCI_PHY_SUB_AWAIT_OSSP_EN:
+	case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+	case SCI_PHY_SUB_AWAIT_SAS_POWER:
+	case SCI_PHY_SUB_AWAIT_SATA_POWER:
+	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+	case SCI_PHY_SUB_FINAL:
+	case SCI_PHY_READY:
+		break;
+	default:
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+	enum sci_phy_states state = iphy->sm.current_state_id;
+
+	if (state != SCI_PHY_READY) {
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+	enum sci_phy_states state = iphy->sm.current_state_id;
+
+	switch (state) {
+	case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+		u32 enable_spinup;
+
+		enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+		enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+		writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+		/* Change state to the final state this substate machine has run to completion */
+		sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+		return SCI_SUCCESS;
+	}
+	case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+		u32 scu_sas_pcfg_value;
+
+		/* Release the spinup hold state and reset the OOB state machine */
+		scu_sas_pcfg_value =
+			readl(&iphy->link_layer_registers->phy_configuration);
+		scu_sas_pcfg_value &=
+			~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+		scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+		writel(scu_sas_pcfg_value,
+			&iphy->link_layer_registers->phy_configuration);
+
+		/* Now restart the OOB operation */
+		scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+		scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+		writel(scu_sas_pcfg_value,
+			&iphy->link_layer_registers->phy_configuration);
+
+		/* Change state to the final state this substate machine has run to completion */
+		sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+		return SCI_SUCCESS;
+	}
+	default:
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+	/* continue the link training for the phy as if it were a SAS PHY
+	 * instead of a SATA PHY. This is done because the completion queue had a SAS
+	 * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+	 */
+	u32 phy_control;
+
+	phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+	phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+	writel(phy_control,
+	       &iphy->link_layer_registers->phy_configuration);
+
+	sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+	iphy->protocol = SAS_PROTOCOL_SSP;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+	/* This method continues the link training for the phy as if it were a SATA PHY
+	 * instead of a SAS PHY.  This is done because the completion queue had a SATA
+	 * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+	 */
+	sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+	iphy->protocol = SAS_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ *    all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ *    has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ *    associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ *    sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+					   enum sas_linkrate max_link_rate,
+					   u32 next_state)
+{
+	iphy->max_negotiated_speed = max_link_rate;
+
+	sci_change_state(&iphy->sm, next_state);
+}
+
+static const char *phy_event_name(u32 event_code)
+{
+	switch (scu_get_event_code(event_code)) {
+	case SCU_EVENT_PORT_SELECTOR_DETECTED:
+		return "port selector";
+	case SCU_EVENT_SENT_PORT_SELECTION:
+		return "port selection";
+	case SCU_EVENT_HARD_RESET_TRANSMITTED:
+		return "tx hard reset";
+	case SCU_EVENT_HARD_RESET_RECEIVED:
+		return "rx hard reset";
+	case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+		return "identify timeout";
+	case SCU_EVENT_LINK_FAILURE:
+		return "link fail";
+	case SCU_EVENT_SATA_SPINUP_HOLD:
+		return "sata spinup hold";
+	case SCU_EVENT_SAS_15_SSC:
+	case SCU_EVENT_SAS_15:
+		return "sas 1.5";
+	case SCU_EVENT_SAS_30_SSC:
+	case SCU_EVENT_SAS_30:
+		return "sas 3.0";
+	case SCU_EVENT_SAS_60_SSC:
+	case SCU_EVENT_SAS_60:
+		return "sas 6.0";
+	case SCU_EVENT_SATA_15_SSC:
+	case SCU_EVENT_SATA_15:
+		return "sata 1.5";
+	case SCU_EVENT_SATA_30_SSC:
+	case SCU_EVENT_SATA_30:
+		return "sata 3.0";
+	case SCU_EVENT_SATA_60_SSC:
+	case SCU_EVENT_SATA_60:
+		return "sata 6.0";
+	case SCU_EVENT_SAS_PHY_DETECTED:
+		return "sas detect";
+	case SCU_EVENT_SATA_PHY_DETECTED:
+		return "sata detect";
+	default:
+		return "unknown";
+	}
+}
+
+#define phy_event_dbg(iphy, state, code) \
+	dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+		phy_to_host(iphy)->id, iphy->phy_index, \
+		phy_state_name(state), phy_event_name(code), code)
+
+#define phy_event_warn(iphy, state, code) \
+	dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+		phy_to_host(iphy)->id, iphy->phy_index, \
+		phy_state_name(state), phy_event_name(code), code)
+
+
+void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+{
+	u32 val;
+
+	/* Extend timeout */
+	val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
+	val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
+	val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
+
+	writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+	enum sci_phy_states state = iphy->sm.current_state_id;
+
+	switch (state) {
+	case SCI_PHY_SUB_AWAIT_OSSP_EN:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_SAS_PHY_DETECTED:
+			sci_phy_start_sas_link_training(iphy);
+			iphy->is_in_link_training = true;
+			break;
+		case SCU_EVENT_SATA_SPINUP_HOLD:
+			sci_phy_start_sata_link_training(iphy);
+			iphy->is_in_link_training = true;
+			break;
+		case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+		       /* Extend timeout value */
+		       scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+		       /* Start the oob/sn state machine over again */
+		       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+		       break;
+		default:
+			phy_event_dbg(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_SAS_PHY_DETECTED:
+			/*
+			 * Why is this being reported again by the controller?
+			 * We would re-enter this state so just stay here */
+			break;
+		case SCU_EVENT_SAS_15:
+		case SCU_EVENT_SAS_15_SSC:
+			sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+						       SCI_PHY_SUB_AWAIT_IAF_UF);
+			break;
+		case SCU_EVENT_SAS_30:
+		case SCU_EVENT_SAS_30_SSC:
+			sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+						       SCI_PHY_SUB_AWAIT_IAF_UF);
+			break;
+		case SCU_EVENT_SAS_60:
+		case SCU_EVENT_SAS_60_SSC:
+			sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+						       SCI_PHY_SUB_AWAIT_IAF_UF);
+			break;
+		case SCU_EVENT_SATA_SPINUP_HOLD:
+			/*
+			 * We were doing SAS PHY link training and received a SATA PHY event
+			 * continue OOB/SN as if this were a SATA PHY */
+			sci_phy_start_sata_link_training(iphy);
+			break;
+		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+		       /* Extend the timeout value */
+		       scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+		       /* Start the oob/sn state machine over again */
+		       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+		       break;
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+			break;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_IAF_UF:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_SAS_PHY_DETECTED:
+			/* Backup the state machine */
+			sci_phy_start_sas_link_training(iphy);
+			break;
+		case SCU_EVENT_SATA_SPINUP_HOLD:
+			/* We were doing SAS PHY link training and received a
+			 * SATA PHY event continue OOB/SN as if this were a
+			 * SATA PHY
+			 */
+			sci_phy_start_sata_link_training(iphy);
+			break;
+		case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+			/* Extend the timeout value */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+			/* Start the oob/sn state machine over again */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		case SCU_EVENT_LINK_FAILURE:
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+		case SCU_EVENT_HARD_RESET_RECEIVED:
+			/* Start the oob/sn state machine over again */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_SAS_POWER:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_SATA_POWER:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		case SCU_EVENT_SATA_SPINUP_HOLD:
+			/* These events are received every 10ms and are
+			 * expected while in this state
+			 */
+			break;
+
+		case SCU_EVENT_SAS_PHY_DETECTED:
+			/* There has been a change in the phy type before OOB/SN for the
+			 * SATA finished start down the SAS link traning path.
+			 */
+			sci_phy_start_sas_link_training(iphy);
+			break;
+
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		case SCU_EVENT_SATA_SPINUP_HOLD:
+			/* These events might be received since we dont know how many may be in
+			 * the completion queue while waiting for power
+			 */
+			break;
+		case SCU_EVENT_SATA_PHY_DETECTED:
+			iphy->protocol = SAS_PROTOCOL_SATA;
+
+			/* We have received the SATA PHY notification change state */
+			sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+			break;
+		case SCU_EVENT_SAS_PHY_DETECTED:
+			/* There has been a change in the phy type before OOB/SN for the
+			 * SATA finished start down the SAS link traning path.
+			 */
+			sci_phy_start_sas_link_training(iphy);
+			break;
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_SATA_PHY_DETECTED:
+			/*
+			 * The hardware reports multiple SATA PHY detected events
+			 * ignore the extras */
+			break;
+		case SCU_EVENT_SATA_15:
+		case SCU_EVENT_SATA_15_SSC:
+			sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+						       SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+			break;
+		case SCU_EVENT_SATA_30:
+		case SCU_EVENT_SATA_30_SSC:
+			sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+						       SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+			break;
+		case SCU_EVENT_SATA_60:
+		case SCU_EVENT_SATA_60_SSC:
+			sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+						       SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+			break;
+		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		case SCU_EVENT_SAS_PHY_DETECTED:
+			/*
+			 * There has been a change in the phy type before OOB/SN for the
+			 * SATA finished start down the SAS link traning path. */
+			sci_phy_start_sas_link_training(iphy);
+			break;
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+
+		return SCI_SUCCESS;
+	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_SATA_PHY_DETECTED:
+			/* Backup the state machine */
+			sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+			break;
+
+		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_READY:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_LINK_FAILURE:
+			/* Set default timeout */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		case SCU_EVENT_BROADCAST_CHANGE:
+		case SCU_EVENT_BROADCAST_SES:
+		case SCU_EVENT_BROADCAST_RESERVED0:
+		case SCU_EVENT_BROADCAST_RESERVED1:
+		case SCU_EVENT_BROADCAST_EXPANDER:
+		case SCU_EVENT_BROADCAST_AEN:
+			/* Broadcast change received. Notify the port. */
+			if (phy_get_non_dummy_port(iphy) != NULL)
+				sci_port_broadcast_change_received(iphy->owning_port, iphy);
+			else
+				iphy->bcn_received_while_port_unassigned = true;
+			break;
+		case SCU_EVENT_BROADCAST_RESERVED3:
+		case SCU_EVENT_BROADCAST_RESERVED4:
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE_INVALID_STATE;
+		}
+		return SCI_SUCCESS;
+	case SCI_PHY_RESETTING:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_HARD_RESET_TRANSMITTED:
+			/* Link failure change state back to the starting state */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
+		default:
+			phy_event_warn(iphy, state, event_code);
+			return SCI_FAILURE_INVALID_STATE;
+			break;
+		}
+		return SCI_SUCCESS;
+	default:
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+	enum sci_phy_states state = iphy->sm.current_state_id;
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+	enum sci_status result;
+	unsigned long flags;
+
+	switch (state) {
+	case SCI_PHY_SUB_AWAIT_IAF_UF: {
+		u32 *frame_words;
+		struct sas_identify_frame iaf;
+
+		result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								  frame_index,
+								  (void **)&frame_words);
+
+		if (result != SCI_SUCCESS)
+			return result;
+
+		sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+		if (iaf.frame_type == 0) {
+			u32 state;
+
+			spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+			memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+			spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+			if (iaf.smp_tport) {
+				/* We got the IAF for an expander PHY go to the final
+				 * state since there are no power requirements for
+				 * expander phys.
+				 */
+				state = SCI_PHY_SUB_FINAL;
+			} else {
+				/* We got the IAF we can now go to the await spinup
+				 * semaphore state
+				 */
+				state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+			}
+			sci_change_state(&iphy->sm, state);
+			result = SCI_SUCCESS;
+		} else
+			dev_warn(sciphy_to_dev(iphy),
+				"%s: PHY starting substate machine received "
+				"unexpected frame id %x\n",
+				__func__, frame_index);
+
+		sci_controller_release_frame(ihost, frame_index);
+		return result;
+	}
+	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+		struct dev_to_host_fis *frame_header;
+		u32 *fis_frame_data;
+
+		result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								  frame_index,
+								  (void **)&frame_header);
+
+		if (result != SCI_SUCCESS)
+			return result;
+
+		if ((frame_header->fis_type == FIS_REGD2H) &&
+		    !(frame_header->status & ATA_BUSY)) {
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								 frame_index,
+								 (void **)&fis_frame_data);
+
+			spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+			sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+							  frame_header,
+							  fis_frame_data);
+			spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+			/* got IAF we can now go to the await spinup semaphore state */
+			sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+			result = SCI_SUCCESS;
+		} else
+			dev_warn(sciphy_to_dev(iphy),
+				 "%s: PHY starting substate machine received "
+				 "unexpected frame id %x\n",
+				 __func__, frame_index);
+
+		/* Regardless of the result we are done with this frame with it */
+		sci_controller_release_frame(ihost, frame_index);
+
+		return result;
+	}
+	default:
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	/* This is just an temporary state go off to the starting state */
+	sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+	sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+	sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+	sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+	sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+		/*
+		 * Clear the PE suspend condition so we can actually
+		 * receive SIG FIS
+		 * The hardware will not respond to the XRDY until the PE
+		 * suspend condition is cleared.
+		 */
+		sci_phy_resume(iphy);
+
+		sci_mod_timer(&iphy->sata_timer,
+			      SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+	} else
+		iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	/* State machine has run to completion so exit out and change
+	 * the base state machine to the ready state
+	 */
+	sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+	struct isci_phy *iphy)
+{
+	u32 scu_sas_pcfg_value;
+	u32 enable_spinup_value;
+
+	/* Suspend the protocol engine and place it in a sata spinup hold state */
+	scu_sas_pcfg_value =
+		readl(&iphy->link_layer_registers->phy_configuration);
+	scu_sas_pcfg_value |=
+		(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+		 SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+		 SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+	writel(scu_sas_pcfg_value,
+	       &iphy->link_layer_registers->phy_configuration);
+
+	/* Disable the notify enable spinup primitives */
+	enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+	enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+	writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
+{
+	struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+	u32 val;
+
+	/** Reset OOB sequence - start */
+	val = readl(&ll->phy_configuration);
+	val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+		 SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) |
+		 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+	writel(val, &ll->phy_configuration);
+	readl(&ll->phy_configuration); /* flush */
+	/** Reset OOB sequence - end */
+
+	/** Start OOB sequence - start */
+	val = readl(&ll->phy_configuration);
+	val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+	writel(val, &ll->phy_configuration);
+	readl(&ll->phy_configuration); /* flush */
+	/** Start OOB sequence - end */
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+	struct isci_phy *iphy)
+{
+	u32 phy_configuration_value;
+
+	/*
+	 * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+	 * to the starting state. */
+	phy_configuration_value =
+		readl(&iphy->link_layer_registers->phy_configuration);
+	phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+	phy_configuration_value |=
+		(SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+		 SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+	writel(phy_configuration_value,
+	       &iphy->link_layer_registers->phy_configuration);
+
+	/* Now take the OOB state machine out of reset */
+	phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+	phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+	writel(phy_configuration_value,
+	       &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_port *iport = iphy->owning_port;
+	struct isci_host *ihost = iport->owning_controller;
+
+	/*
+	 * @todo We need to get to the controller to place this PE in a
+	 * reset state
+	 */
+	sci_del_timer(&iphy->sata_timer);
+
+	scu_link_layer_stop_protocol_engine(iphy);
+
+	if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+		sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_port *iport = iphy->owning_port;
+	struct isci_host *ihost = iport->owning_controller;
+
+	scu_link_layer_stop_protocol_engine(iphy);
+	scu_link_layer_start_oob(iphy);
+
+	/* We don't know what kind of phy we are going to be just yet */
+	iphy->protocol = SAS_PROTOCOL_NONE;
+	iphy->bcn_received_while_port_unassigned = false;
+
+	if (iphy->sm.previous_state_id == SCI_PHY_READY)
+		sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+	sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+	struct isci_port *iport = iphy->owning_port;
+	struct isci_host *ihost = iport->owning_controller;
+
+	sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+	/* The phy is being reset, therefore deactivate it from the port.  In
+	 * the resetting state we don't notify the user regarding link up and
+	 * link down notifications
+	 */
+	sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+	if (iphy->protocol == SAS_PROTOCOL_SSP) {
+		scu_link_layer_tx_hard_reset(iphy);
+	} else {
+		/* The SCU does not need to have a discrete reset state so
+		 * just go back to the starting state.
+		 */
+		sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+	}
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+	[SCI_PHY_INITIAL] = { },
+	[SCI_PHY_STOPPED] = {
+		.enter_state = sci_phy_stopped_state_enter,
+	},
+	[SCI_PHY_STARTING] = {
+		.enter_state = sci_phy_starting_state_enter,
+	},
+	[SCI_PHY_SUB_INITIAL] = {
+		.enter_state = sci_phy_starting_initial_substate_enter,
+	},
+	[SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+	[SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+	[SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+	[SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+		.enter_state = sci_phy_starting_await_sas_power_substate_enter,
+		.exit_state  = sci_phy_starting_await_sas_power_substate_exit,
+	},
+	[SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+		.enter_state = sci_phy_starting_await_sata_power_substate_enter,
+		.exit_state  = sci_phy_starting_await_sata_power_substate_exit
+	},
+	[SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+		.enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+		.exit_state  = sci_phy_starting_await_sata_phy_substate_exit
+	},
+	[SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+		.enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+		.exit_state  = sci_phy_starting_await_sata_speed_substate_exit
+	},
+	[SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+		.enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+		.exit_state  = sci_phy_starting_await_sig_fis_uf_substate_exit
+	},
+	[SCI_PHY_SUB_FINAL] = {
+		.enter_state = sci_phy_starting_final_substate_enter,
+	},
+	[SCI_PHY_READY] = {
+		.enter_state = sci_phy_ready_state_enter,
+		.exit_state = sci_phy_ready_state_exit,
+	},
+	[SCI_PHY_RESETTING] = {
+		.enter_state = sci_phy_resetting_state_enter,
+	},
+	[SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+			    struct isci_port *iport, u8 phy_index)
+{
+	sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+	/* Copy the rest of the input data to our locals */
+	iphy->owning_port = iport;
+	iphy->phy_index = phy_index;
+	iphy->bcn_received_while_port_unassigned = false;
+	iphy->protocol = SAS_PROTOCOL_NONE;
+	iphy->link_layer_registers = NULL;
+	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+	/* Create the SIGNATURE FIS Timeout timer for this phy */
+	sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+	struct sci_oem_params *oem = &ihost->oem_parameters;
+	u64 sci_sas_addr;
+	__be64 sas_addr;
+
+	sci_sas_addr = oem->phys[index].sas_address.high;
+	sci_sas_addr <<= 32;
+	sci_sas_addr |= oem->phys[index].sas_address.low;
+	sas_addr = cpu_to_be64(sci_sas_addr);
+	memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+	iphy->sas_phy.enabled = 0;
+	iphy->sas_phy.id = index;
+	iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+	iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+	iphy->sas_phy.ha = &ihost->sas_ha;
+	iphy->sas_phy.lldd_phy = iphy;
+	iphy->sas_phy.enabled = 1;
+	iphy->sas_phy.class = SAS;
+	iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+	iphy->sas_phy.tproto = 0;
+	iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+	iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+	iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+	iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+	memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ *    functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+		     enum phy_func func,
+		     void *buf)
+{
+	int ret = 0;
+	struct isci_phy *iphy = sas_phy->lldd_phy;
+	struct asd_sas_port *port = sas_phy->port;
+	struct isci_host *ihost = sas_phy->ha->lldd_ha;
+	unsigned long flags;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+		__func__, sas_phy, func, buf, iphy, port);
+
+	switch (func) {
+	case PHY_FUNC_DISABLE:
+		spin_lock_irqsave(&ihost->scic_lock, flags);
+		scu_link_layer_start_oob(iphy);
+		sci_phy_stop(iphy);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		break;
+
+	case PHY_FUNC_LINK_RESET:
+		spin_lock_irqsave(&ihost->scic_lock, flags);
+		scu_link_layer_start_oob(iphy);
+		sci_phy_stop(iphy);
+		sci_phy_start(iphy);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		break;
+
+	case PHY_FUNC_HARD_RESET:
+		if (!port)
+			return -ENODEV;
+
+		ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
+
+		break;
+	case PHY_FUNC_GET_EVENTS: {
+		struct scu_link_layer_registers __iomem *r;
+		struct sas_phy *phy = sas_phy->phy;
+
+		r = iphy->link_layer_registers;
+		phy->running_disparity_error_count = readl(&r->running_disparity_error_count);
+		phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count);
+		phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count);
+		phy->invalid_dword_count = readl(&r->invalid_dword_counter);
+		break;
+	}
+
+	default:
+		dev_dbg(&ihost->pdev->dev,
+			   "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+			   __func__, sas_phy, func);
+		ret = -ENOSYS;
+		break;
+	}
+	return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 0000000..45fecfa
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,460 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine.  Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset.  Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT    25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals.  We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT  250
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+	struct sci_base_state_machine sm;
+	struct isci_port *owning_port;
+	enum sas_linkrate max_negotiated_speed;
+	enum sas_protocol protocol;
+	u8 phy_index;
+	bool bcn_received_while_port_unassigned;
+	bool is_in_link_training;
+	struct sci_timer sata_timer;
+	struct scu_transport_layer_registers __iomem *transport_layer_registers;
+	struct scu_link_layer_registers __iomem *link_layer_registers;
+	struct asd_sas_phy sas_phy;
+	u8 sas_addr[SAS_ADDR_SIZE];
+	union {
+		struct sas_identify_frame iaf;
+		struct dev_to_host_fis fis;
+	} frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+	struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+	return iphy;
+}
+
+struct sci_phy_cap {
+	union {
+		struct {
+			/*
+			 * The SAS specification indicates the start bit shall
+			 * always be set to
+			 * 1.  This implementation will have the start bit set
+			 * to 0 if the PHY CAPABILITIES were either not
+			 * received or speed negotiation failed.
+			 */
+			u8 start:1;
+			u8 tx_ssc_type:1;
+			u8 res1:2;
+			u8 req_logical_linkrate:4;
+
+			u32 gen1_no_ssc:1;
+			u32 gen1_ssc:1;
+			u32 gen2_no_ssc:1;
+			u32 gen2_ssc:1;
+			u32 gen3_no_ssc:1;
+			u32 gen3_ssc:1;
+			u32 res2:17;
+			u32 parity:1;
+		};
+		u32 all;
+	};
+}  __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+	union {
+		struct {
+			u16 _r_a:1;
+			u16 smp_iport:1;
+			u16 stp_iport:1;
+			u16 ssp_iport:1;
+			u16 _r_b:4;
+			u16 _r_c:1;
+			u16 smp_tport:1;
+			u16 stp_tport:1;
+			u16 ssp_tport:1;
+			u16 _r_d:4;
+		};
+		u16 all;
+	};
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ *    all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+	/**
+	 * This field specifies the port that currently contains the
+	 * supplied phy.  This field may be set to NULL
+	 * if the phy is not currently contained in a port.
+	 */
+	struct isci_port *iport;
+
+	/**
+	 * This field specifies the link rate at which the phy is
+	 * currently operating.
+	 */
+	enum sas_linkrate negotiated_link_rate;
+
+	/**
+	 * This field specifies the index of the phy in relation to other
+	 * phys within the controller.  This index is zero relative.
+	 */
+	u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ *    specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+	/**
+	 * This field delineates the Identify Address Frame received
+	 * from the remote end point.
+	 */
+	struct sas_identify_frame rcvd_iaf;
+
+	/**
+	 * This field delineates the Phy capabilities structure received
+	 * from the remote end point.
+	 */
+	struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ *    specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+	/**
+	 * This field delineates the signature FIS received from the
+	 * attached target.
+	 */
+	struct dev_to_host_fis signature_fis;
+
+	/**
+	 * This field specifies to the user if a port selector is connected
+	 * on the specified phy.
+	 */
+	bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ *    optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+	/**
+	 * This PHY information field tracks the number of frames received.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+	/**
+	 * This PHY information field tracks the number of frames transmitted.
+	 */
+	SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+	/**
+	 * This PHY information field tracks the number of DWORDs received.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+	/**
+	 * This PHY information field tracks the number of DWORDs transmitted.
+	 */
+	SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+	/**
+	 * This PHY information field tracks the number of times DWORD
+	 * synchronization was lost.
+	 */
+	SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+	/**
+	 * This PHY information field tracks the number of received DWORDs with
+	 * running disparity errors.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+	/**
+	 * This PHY information field tracks the number of received frames with a
+	 * CRC error (not including short or truncated frames).
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+	/**
+	 * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+	 * primitives received.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+	/**
+	 * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+	 * primitives transmitted.
+	 */
+	SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+	/**
+	 * This PHY information field tracks the number of times the inactivity
+	 * timer for connections on the phy has been utilized.
+	 */
+	SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+	/**
+	 * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+	 * primitives received.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+	/**
+	 * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+	 * primitives transmitted.
+	 */
+	SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+	/**
+	 * This PHY information field tracks the number of CREDIT BLOCKED
+	 * primitives received.
+	 * @note Depending on remote device implementation, credit blocks
+	 *       may occur regularly.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+	/**
+	 * This PHY information field contains the number of short frames
+	 * received.  A short frame is simply a frame smaller then what is
+	 * allowed by either the SAS or SATA specification.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+	/**
+	 * This PHY information field contains the number of frames received after
+	 * credit has been exhausted.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+	/**
+	 * This PHY information field contains the number of frames received after
+	 * a DONE has been received.
+	 */
+	SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+	/**
+	 * This PHY information field contains the number of times the phy
+	 * failed to achieve DWORD synchronization during speed negotiation.
+	 */
+	SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+/**
+ * enum sci_phy_states - phy state machine states
+ * @SCI_PHY_INITIAL: Simply the initial state for the base domain state
+ *		     machine.
+ * @SCI_PHY_STOPPED: phy has successfully been stopped.  In this state
+ *		     no new IO operations are permitted on this phy.
+ * @SCI_PHY_STARTING: the phy is in the process of becomming ready.  In
+ *		      this state no new IO operations are permitted on
+ *		      this phy.
+ * @SCI_PHY_SUB_INITIAL: Initial state
+ * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
+ *			       type notification
+ * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
+ *				    notification
+ * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
+ *			      notification
+ * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
+ *				 power
+ * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
+ *				  power
+ * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
+ *				   notification
+ * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
+ *				     notification
+ * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
+ *				  unsolicited frame notification
+ * @SCI_PHY_SUB_FINAL: Exit state for this state machine
+ * @SCI_PHY_READY: phy is now ready.  Thus, the user is able to perform
+ *		   IO operations utilizing this phy as long as it is
+ *		   currently part of a valid port.  This state is
+ *		   entered from the STARTING state.
+ * @SCI_PHY_RESETTING: phy is in the process of being reset.  In this
+ *		       state no new IO operations are permitted on this
+ *		       phy.  This state is entered from the READY state.
+ * @SCI_PHY_FINAL: Simply the final state for the base phy state
+ *		   machine.
+ */
+#define PHY_STATES {\
+	C(PHY_INITIAL),\
+	C(PHY_STOPPED),\
+	C(PHY_STARTING),\
+	C(PHY_SUB_INITIAL),\
+	C(PHY_SUB_AWAIT_OSSP_EN),\
+	C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
+	C(PHY_SUB_AWAIT_IAF_UF),\
+	C(PHY_SUB_AWAIT_SAS_POWER),\
+	C(PHY_SUB_AWAIT_SATA_POWER),\
+	C(PHY_SUB_AWAIT_SATA_PHY_EN),\
+	C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
+	C(PHY_SUB_AWAIT_SIG_FIS_UF),\
+	C(PHY_SUB_FINAL),\
+	C(PHY_READY),\
+	C(PHY_RESETTING),\
+	C(PHY_FINAL),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_phy_states PHY_STATES;
+#undef C
+
+void sci_phy_construct(
+	struct isci_phy *iphy,
+	struct isci_port *iport,
+	u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+	struct isci_phy *iphy,
+	struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+	struct isci_phy *iphy,
+	struct scu_transport_layer_registers __iomem *transport_layer_registers,
+	struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+	struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+	struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+	struct isci_phy *iphy);
+
+void sci_phy_resume(
+	struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+	struct isci_phy *iphy,
+	u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+	struct isci_phy *iphy,
+	u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+	struct isci_phy *iphy,
+	u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+	struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+	struct isci_phy *iphy,
+	struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+	struct isci_phy *iphy,
+	struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+	struct isci_phy *iphy,
+	struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 0000000..13098b0
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1770 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
+#define SCU_DUMMY_INDEX    (0xFFFF)
+
+#undef C
+#define C(a) (#a)
+const char *port_state_name(enum sci_port_states state)
+{
+	static const char * const strings[] = PORT_STATES;
+
+	return strings[state];
+}
+#undef C
+
+static struct device *sciport_to_dev(struct isci_port *iport)
+{
+	int i = iport->physical_port_index;
+	struct isci_port *table;
+	struct isci_host *ihost;
+
+	if (i == SCIC_SDS_DUMMY_PORT)
+		i = SCI_MAX_PORTS+1;
+
+	table = iport - i;
+	ihost = container_of(table, typeof(*ihost), ports[0]);
+
+	return &ihost->pdev->dev;
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+	u8 index;
+
+	proto->all = 0;
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		struct isci_phy *iphy = iport->phy_table[index];
+
+		if (!iphy)
+			continue;
+		sci_phy_get_protocols(iphy, proto);
+	}
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+	u32 index;
+	u32 mask;
+
+	mask = 0;
+	for (index = 0; index < SCI_MAX_PHYS; index++)
+		if (iport->phy_table[index])
+			mask |= (1 << index);
+
+	return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ *    regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ *    index.
+ * @properties: This parameter specifies the properties structure into which to
+ *    copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid.  When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+enum sci_status sci_port_get_properties(struct isci_port *iport,
+						struct sci_port_properties *prop)
+{
+	if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+		return SCI_FAILURE_INVALID_PORT;
+
+	prop->index = iport->logical_port_index;
+	prop->phy_mask = sci_port_get_phys(iport);
+	sci_port_get_sas_address(iport, &prop->local.sas_address);
+	sci_port_get_protocols(iport, &prop->local.protocols);
+	sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+	return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+	struct isci_phy *iphy;
+	u32 val;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+		iphy = iport->phy_table[i];
+		if (!iphy)
+			continue;
+		val = readl(&iphy->link_layer_registers->link_layer_control);
+		/* clear the bit by writing 1. */
+		writel(val, &iphy->link_layer_registers->link_layer_control);
+	}
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+					 struct isci_port *iport,
+					 struct isci_phy *iphy)
+{
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_phy = %p, sas_phy = %p\n",
+		__func__, iphy, &iphy->sas_phy);
+
+	ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
+	sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+			      struct isci_port *iport,
+			      struct isci_phy *iphy)
+{
+	unsigned long flags;
+	struct sci_port_properties properties;
+	unsigned long success = true;
+
+	dev_dbg(&isci_host->pdev->dev,
+		"%s: isci_port = %p\n",
+		__func__, iport);
+
+	spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+	sci_port_get_properties(iport, &properties);
+
+	if (iphy->protocol == SAS_PROTOCOL_SATA) {
+		u64 attached_sas_address;
+
+		iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+		iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+		/*
+		 * For direct-attached SATA devices, the SCI core will
+		 * automagically assign a SAS address to the end device
+		 * for the purpose of creating a port. This SAS address
+		 * will not be the same as assigned to the PHY and needs
+		 * to be obtained from struct sci_port_properties properties.
+		 */
+		attached_sas_address = properties.remote.sas_address.high;
+		attached_sas_address <<= 32;
+		attached_sas_address |= properties.remote.sas_address.low;
+		swab64s(&attached_sas_address);
+
+		memcpy(&iphy->sas_phy.attached_sas_addr,
+		       &attached_sas_address, sizeof(attached_sas_address));
+	} else if (iphy->protocol == SAS_PROTOCOL_SSP) {
+		iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+		iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+		/* Copy the attached SAS address from the IAF */
+		memcpy(iphy->sas_phy.attached_sas_addr,
+		       iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+	} else {
+		dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__);
+		success = false;
+	}
+
+	iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+	spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+	/* Notify libsas that we have an address frame, if indeed
+	 * we've found an SSP, SMP, or STP target */
+	if (success)
+		isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+						    PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ *    becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+				struct isci_phy *isci_phy,
+				struct isci_port *isci_port)
+{
+	struct isci_remote_device *isci_device;
+
+	dev_dbg(&isci_host->pdev->dev,
+		"%s: isci_port = %p\n", __func__, isci_port);
+
+	if (isci_port) {
+
+		/* check to see if this is the last phy on this port. */
+		if (isci_phy->sas_phy.port &&
+		    isci_phy->sas_phy.port->num_phys == 1) {
+			/* change the state for all devices on this port.  The
+			* next task sent to this device will be returned as
+			* SAS_TASK_UNDELIVERED, and the scsi mid layer will
+			* remove the target
+			*/
+			list_for_each_entry(isci_device,
+					    &isci_port->remote_dev_list,
+					    node) {
+				dev_dbg(&isci_host->pdev->dev,
+					"%s: isci_device = %p\n",
+					__func__, isci_device);
+				set_bit(IDEV_GONE, &isci_device->flags);
+			}
+		}
+	}
+
+	/* Notify libsas of the borken link, this will trigger calls to our
+	 * isci_port_deformed and isci_dev_gone functions.
+	 */
+	sas_phy_disconnected(&isci_phy->sas_phy);
+	isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+					   PHYE_LOSS_OF_SIGNAL);
+
+	dev_dbg(&isci_host->pdev->dev,
+		"%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+	switch (state) {
+	case SCI_PORT_READY:
+	case SCI_PORT_SUB_WAITING:
+	case SCI_PORT_SUB_OPERATIONAL:
+	case SCI_PORT_SUB_CONFIGURING:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+				      enum sci_port_states state)
+{
+	struct sci_base_state_machine *sm = &iport->sm;
+	enum sci_port_states old_state = sm->current_state_id;
+
+	if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+		iport->ready_exit = true;
+
+	sci_change_state(sm, state);
+	iport->ready_exit = false;
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ *    when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ *    process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+					  enum sci_status completion_status)
+{
+	struct isci_host *ihost = isci_port->owning_controller;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_port = %p, completion_status=%x\n",
+		     __func__, isci_port, completion_status);
+
+	/* Save the status of the hard reset from the port. */
+	isci_port->hard_reset_status = completion_status;
+
+	if (completion_status != SCI_SUCCESS) {
+
+		/* The reset failed.  The port state is now SCI_PORT_FAILED. */
+		if (isci_port->active_phy_mask == 0) {
+			int phy_idx = isci_port->last_active_phy;
+			struct isci_phy *iphy = &ihost->phys[phy_idx];
+
+			/* Generate the link down now to the host, since it
+			 * was intercepted by the hard reset state machine when
+			 * it really happened.
+			 */
+			isci_port_link_down(ihost, iphy, isci_port);
+		}
+		/* Advance the port state so that link state changes will be
+		 * noticed.
+		 */
+		port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
+
+	}
+	clear_bit(IPORT_RESET_PENDING, &isci_port->state);
+	wake_up(&ihost->eventq);
+
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 -     1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations.  It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port.  For example, one
+ * could assign phy 3 to port 0 and no other phys.  Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	struct sci_user_parameters *user = &ihost->user_parameters;
+
+	/* Initialize to invalid value. */
+	u32 existing_phy_index = SCI_MAX_PHYS;
+	u32 index;
+
+	if ((iport->physical_port_index == 1) && (phy_index != 1))
+		return false;
+
+	if (iport->physical_port_index == 3 && phy_index != 3)
+		return false;
+
+	if (iport->physical_port_index == 2 &&
+	    (phy_index == 0 || phy_index == 1))
+		return false;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++)
+		if (iport->phy_table[index] && index != phy_index)
+			existing_phy_index = index;
+
+	/* Ensure that all of the phys in the port are capable of
+	 * operating at the same maximum link rate.
+	 */
+	if (existing_phy_index < SCI_MAX_PHYS &&
+	    user->phys[phy_index].max_speed_generation !=
+	    user->phys[existing_phy_index].max_speed_generation)
+		return false;
+
+	return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ *    can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3  2] 1] 0 - Port 1 -        [1] - Port 2 - [[3] 2]
+ * - Port 3 -  [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+	struct isci_port *iport,
+	u32 phy_mask)
+{
+	if (iport->physical_port_index == 0) {
+		if (((phy_mask & 0x0F) == 0x0F)
+		    || ((phy_mask & 0x03) == 0x03)
+		    || ((phy_mask & 0x01) == 0x01)
+		    || (phy_mask == 0))
+			return true;
+	} else if (iport->physical_port_index == 1) {
+		if (((phy_mask & 0x02) == 0x02)
+		    || (phy_mask == 0))
+			return true;
+	} else if (iport->physical_port_index == 2) {
+		if (((phy_mask & 0x0C) == 0x0C)
+		    || ((phy_mask & 0x04) == 0x04)
+		    || (phy_mask == 0))
+			return true;
+	} else if (iport->physical_port_index == 3) {
+		if (((phy_mask & 0x08) == 0x08)
+		    || (phy_mask == 0))
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port.  Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+	u32 index;
+	struct isci_phy *iphy;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		/* Ensure that the phy is both part of the port and currently
+		 * connected to the remote end-point.
+		 */
+		iphy = iport->phy_table[index];
+		if (iphy && sci_port_active_phy(iport, iphy))
+			return iphy;
+	}
+
+	return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+	/* Check to see if we can add this phy to a port
+	 * that means that the phy is not part of a port and that the port does
+	 * not already have a phy assinged to the phy index.
+	 */
+	if (!iport->phy_table[iphy->phy_index] &&
+	    !phy_get_non_dummy_port(iphy) &&
+	    sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+		/* Phy is being added in the stopped state so we are in MPC mode
+		 * make logical port index = physical port index
+		 */
+		iport->logical_port_index = iport->physical_port_index;
+		iport->phy_table[iphy->phy_index] = iphy;
+		sci_phy_set_port(iphy, iport);
+
+		return SCI_SUCCESS;
+	}
+
+	return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+	/* Make sure that this phy is part of this port */
+	if (iport->phy_table[iphy->phy_index] == iphy &&
+	    phy_get_non_dummy_port(iphy) == iport) {
+		struct isci_host *ihost = iport->owning_controller;
+
+		/* Yep it is assigned to this port so remove it */
+		sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+		iport->phy_table[iphy->phy_index] = NULL;
+		return SCI_SUCCESS;
+	}
+
+	return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+	u32 index;
+
+	sas->high = 0;
+	sas->low  = 0;
+	for (index = 0; index < SCI_MAX_PHYS; index++)
+		if (iport->phy_table[index])
+			sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+	struct isci_phy *iphy;
+
+	/*
+	 * Ensure that the phy is both part of the port and currently
+	 * connected to the remote end-point.
+	 */
+	iphy = sci_port_get_a_connected_phy(iport);
+	if (iphy) {
+		if (iphy->protocol != SAS_PROTOCOL_SATA) {
+			sci_phy_get_attached_sas_address(iphy, sas);
+		} else {
+			sci_phy_get_sas_address(iphy, sas);
+			sas->low += iphy->phy_index;
+		}
+	} else {
+		sas->high = 0;
+		sas->low  = 0;
+	}
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+	union scu_remote_node_context *rnc;
+
+	rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+	memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+	rnc->ssp.remote_sas_address_hi = 0;
+	rnc->ssp.remote_sas_address_lo = 0;
+
+	rnc->ssp.remote_node_index = rni;
+	rnc->ssp.remote_node_port_width = 1;
+	rnc->ssp.logical_port_index = iport->physical_port_index;
+
+	rnc->ssp.nexus_loss_timer_enable = false;
+	rnc->ssp.check_bit = false;
+	rnc->ssp.is_valid = true;
+	rnc->ssp.is_remote_node_context = true;
+	rnc->ssp.function_number = 0;
+	rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure.  This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	struct scu_task_context *task_context;
+
+	task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+	memset(task_context, 0, sizeof(struct scu_task_context));
+
+	task_context->initiator_request = 1;
+	task_context->connection_rate = 1;
+	task_context->logical_port_index = iport->physical_port_index;
+	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+	task_context->task_index = ISCI_TAG_TCI(tag);
+	task_context->valid = SCU_TASK_CONTEXT_VALID;
+	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+	task_context->remote_node_index = iport->reserved_rni;
+	task_context->do_not_dma_ssp_good_response = 1;
+	task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+	struct isci_host *ihost = iport->owning_controller;
+
+	if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+		isci_free_tag(ihost, iport->reserved_tag);
+
+	if (iport->reserved_rni != SCU_DUMMY_INDEX)
+		sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+								     1, iport->reserved_rni);
+
+	iport->reserved_rni = SCU_DUMMY_INDEX;
+	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+	u8 index;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		if (iport->active_phy_mask & (1 << index))
+			sci_phy_setup_transport(iport->phy_table[index], device_id);
+	}
+}
+
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+	sci_phy_resume(iphy);
+	iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+				  struct isci_phy *iphy,
+				  u8 flags)
+{
+	struct isci_host *ihost = iport->owning_controller;
+
+	if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
+		sci_phy_resume(iphy);
+
+	iport->active_phy_mask |= 1 << iphy->phy_index;
+
+	sci_controller_clear_invalid_phy(ihost, iphy);
+
+	if (flags & PF_NOTIFY)
+		isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+			     bool do_notify_user)
+{
+	struct isci_host *ihost = iport->owning_controller;
+
+	iport->active_phy_mask &= ~(1 << iphy->phy_index);
+	iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
+	if (!iport->active_phy_mask)
+		iport->last_active_phy = iphy->phy_index;
+
+	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+	/* Re-assign the phy back to the LP as if it were a narrow port for APC
+	 * mode. For MPC mode, the phy will remain in the port.
+	 */
+	if (iport->owning_controller->oem_parameters.controller.mode_type ==
+		SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+		writel(iphy->phy_index,
+			&iport->port_pe_configuration_register[iphy->phy_index]);
+
+	if (do_notify_user == true)
+		isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+	struct isci_host *ihost = iport->owning_controller;
+
+	/*
+	 * Check to see if we have alreay reported this link as bad and if
+	 * not go ahead and tell the SCI_USER that we have discovered an
+	 * invalid link.
+	 */
+	if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+		ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+		dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+	}
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
+ *
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+					     struct isci_phy *iphy,
+					     u8 flags)
+{
+	struct sci_sas_address port_sas_address;
+	struct sci_sas_address phy_sas_address;
+
+	sci_port_get_attached_sas_address(iport, &port_sas_address);
+	sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+	/* If the SAS address of the new phy matches the SAS address of
+	 * other phys in the port OR this is the first phy in the port,
+	 * then activate the phy and allow it to be used for operations
+	 * in this port.
+	 */
+	if ((phy_sas_address.high == port_sas_address.high &&
+	     phy_sas_address.low  == port_sas_address.low) ||
+	    iport->active_phy_mask == 0) {
+		struct sci_base_state_machine *sm = &iport->sm;
+
+		sci_port_activate_phy(iport, iphy, flags);
+		if (sm->current_state_id == SCI_PORT_RESETTING)
+			port_state_machine_change(iport, SCI_PORT_READY);
+	} else
+		sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ *     If there are no phys or more than one phy then the method will return
+ *    true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+	u32 index;
+	u32 phy_count = 0;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		if (iport->phy_table[index] != NULL) {
+			phy_count++;
+		}
+	}
+
+	return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ *    port wants the PHY to continue on to the link up state then the port
+ *    layer must return true.  If the port object returns false the phy object
+ *    must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys.  Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
+{
+	if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+	    (iphy->protocol == SAS_PROTOCOL_SATA)) {
+		if (sci_port_is_wide(iport)) {
+			sci_port_invalid_link_up(iport, iphy);
+			return false;
+		} else {
+			struct isci_host *ihost = iport->owning_controller;
+			struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+			writel(iphy->phy_index,
+			       &dst_port->port_pe_configuration_register[iphy->phy_index]);
+		}
+	}
+
+	return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+	struct isci_host *ihost = iport->owning_controller;
+	unsigned long flags;
+	u32 current_state;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	current_state = iport->sm.current_state_id;
+
+	if (current_state == SCI_PORT_RESETTING) {
+		/* if the port is still in the resetting state then the timeout
+		 * fired before the reset completed.
+		 */
+		port_state_machine_change(iport, SCI_PORT_FAILED);
+	} else if (current_state == SCI_PORT_STOPPED) {
+		/* if the port is stopped then the start request failed In this
+		 * case stay in the stopped state.
+		 */
+		dev_err(sciport_to_dev(iport),
+			"%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+			__func__,
+			iport);
+	} else if (current_state == SCI_PORT_STOPPING) {
+		dev_dbg(sciport_to_dev(iport),
+			"%s: port%d: stop complete timeout\n",
+			__func__, iport->physical_port_index);
+	} else {
+		/* The port is in the ready state and we have a timer
+		 * reporting a timeout this should not happen.
+		 */
+		dev_err(sciport_to_dev(iport),
+			"%s: SCIC Port 0x%p is processing a timeout operation "
+			"in state %d.\n", __func__, iport, current_state);
+	}
+
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+	struct sci_sas_address sas_address;
+
+	sci_port_get_sas_address(iport, &sas_address);
+
+	writel(sas_address.high,
+		&iport->viit_registers->initiator_sas_address_hi);
+	writel(sas_address.low,
+		&iport->viit_registers->initiator_sas_address_lo);
+
+	/* This value get cleared just in case its not already cleared */
+	writel(0, &iport->viit_registers->reserved);
+
+	/* We are required to update the status register last */
+	writel(SCU_VIIT_ENTRY_ID_VIIT |
+	       SCU_VIIT_IPPT_INITIATOR |
+	       ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+	       SCU_VIIT_STATUS_ALL_VALID,
+	       &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+	u16 index;
+	struct isci_phy *iphy;
+	enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+	/*
+	 * Loop through all of the phys in this port and find the phy with the
+	 * lowest maximum link rate. */
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		iphy = iport->phy_table[index];
+		if (iphy && sci_port_active_phy(iport, iphy) &&
+		    iphy->max_negotiated_speed < max_allowed_speed)
+			max_allowed_speed = iphy->max_negotiated_speed;
+	}
+
+	return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+	u32 pts_control_value;
+
+	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+	pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	u16 tag = iport->reserved_tag;
+	struct scu_task_context *tc;
+	u32 command;
+
+	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+	tc->abort = 0;
+
+	command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+		  ISCI_TAG_TCI(tag);
+
+	sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request.  This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	u16 tag = iport->reserved_tag;
+	struct scu_task_context *tc;
+	u32 command;
+
+	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+	tc->abort = 1;
+
+	command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+		  ISCI_TAG_TCI(tag);
+
+	sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+	u32 pts_control_value;
+
+	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+	pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+	sci_port_suspend_port_task_scheduler(iport);
+
+	iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+	if (iport->active_phy_mask != 0) {
+		/* At least one of the phys on the port is ready */
+		port_state_machine_change(iport,
+					  SCI_PORT_SUB_OPERATIONAL);
+	}
+}
+
+static void scic_sds_port_ready_substate_waiting_exit(
+					struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+	sci_port_resume_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+	u32 index;
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+	struct isci_host *ihost = iport->owning_controller;
+
+	dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
+		__func__, iport->physical_port_index);
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		if (iport->phy_table[index]) {
+			writel(iport->physical_port_index,
+				&iport->port_pe_configuration_register[
+					iport->phy_table[index]->phy_index]);
+			if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+				sci_port_resume_phy(iport, iport->phy_table[index]);
+		}
+	}
+
+	sci_port_update_viit_entry(iport);
+
+	/*
+	 * Post the dummy task for the port so the hardware can schedule
+	 * io correctly
+	 */
+	sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	u8 phys_index = iport->physical_port_index;
+	union scu_remote_node_context *rnc;
+	u16 rni = iport->reserved_rni;
+	u32 command;
+
+	rnc = &ihost->remote_node_context_table[rni];
+
+	rnc->ssp.is_valid = false;
+
+	/* ensure the preceding tc abort request has reached the
+	 * controller and give it ample time to act before posting the rnc
+	 * invalidate
+	 */
+	readl(&ihost->smu_registers->interrupt_status); /* flush */
+	udelay(10);
+
+	command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+	sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+	struct isci_host *ihost = iport->owning_controller;
+
+	/*
+	 * Kill the dummy task for this port if it has not yet posted
+	 * the hardware will treat this as a NOP and just return abort
+	 * complete.
+	 */
+	sci_port_abort_dummy_request(iport);
+
+	dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+		__func__, iport->physical_port_index);
+
+	if (iport->ready_exit)
+		sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+	struct isci_host *ihost = iport->owning_controller;
+
+	if (iport->active_phy_mask == 0) {
+		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+			__func__, iport->physical_port_index);
+
+		port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+	} else
+		port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	enum sci_status status = SCI_SUCCESS;
+	enum sci_port_states state;
+	u32 phy_mask;
+
+	state = iport->sm.current_state_id;
+	if (state != SCI_PORT_STOPPED) {
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	if (iport->assigned_device_count > 0) {
+		/* TODO This is a start failure operation because
+		 * there are still devices assigned to this port.
+		 * There must be no devices assigned to a port on a
+		 * start operation.
+		 */
+		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+	}
+
+	if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+		u16 rni = sci_remote_node_table_allocate_remote_node(
+				&ihost->available_remote_nodes, 1);
+
+		if (rni != SCU_DUMMY_INDEX)
+			sci_port_construct_dummy_rnc(iport, rni);
+		else
+			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+		iport->reserved_rni = rni;
+	}
+
+	if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+		u16 tag;
+
+		tag = isci_alloc_tag(ihost);
+		if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+		else
+			sci_port_construct_dummy_task(iport, tag);
+		iport->reserved_tag = tag;
+	}
+
+	if (status == SCI_SUCCESS) {
+		phy_mask = sci_port_get_phys(iport);
+
+		/*
+		 * There are one or more phys assigned to this port.  Make sure
+		 * the port's phy mask is in fact legal and supported by the
+		 * silicon.
+		 */
+		if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+			port_state_machine_change(iport,
+						  SCI_PORT_READY);
+
+			return SCI_SUCCESS;
+		}
+		status = SCI_FAILURE;
+	}
+
+	if (status != SCI_SUCCESS)
+		sci_port_destroy_dummy_resources(iport);
+
+	return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+	enum sci_port_states state;
+
+	state = iport->sm.current_state_id;
+	switch (state) {
+	case SCI_PORT_STOPPED:
+		return SCI_SUCCESS;
+	case SCI_PORT_SUB_WAITING:
+	case SCI_PORT_SUB_OPERATIONAL:
+	case SCI_PORT_SUB_CONFIGURING:
+	case SCI_PORT_RESETTING:
+		port_state_machine_change(iport,
+					  SCI_PORT_STOPPING);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+	enum sci_status status = SCI_FAILURE_INVALID_PHY;
+	struct isci_phy *iphy = NULL;
+	enum sci_port_states state;
+	u32 phy_index;
+
+	state = iport->sm.current_state_id;
+	if (state != SCI_PORT_SUB_OPERATIONAL) {
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	/* Select a phy on which we can send the hard reset request. */
+	for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+		iphy = iport->phy_table[phy_index];
+		if (iphy && !sci_port_active_phy(iport, iphy)) {
+			/*
+			 * We found a phy but it is not ready select
+			 * different phy
+			 */
+			iphy = NULL;
+		}
+	}
+
+	/* If we have a phy then go ahead and start the reset procedure */
+	if (!iphy)
+		return status;
+	status = sci_phy_reset(iphy);
+
+	if (status != SCI_SUCCESS)
+		return status;
+
+	sci_mod_timer(&iport->timer, timeout);
+	iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+	port_state_machine_change(iport, SCI_PORT_RESETTING);
+	return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+				      struct isci_phy *iphy)
+{
+	enum sci_status status;
+	enum sci_port_states state;
+
+	sci_port_bcn_enable(iport);
+
+	state = iport->sm.current_state_id;
+	switch (state) {
+	case SCI_PORT_STOPPED: {
+		struct sci_sas_address port_sas_address;
+
+		/* Read the port assigned SAS Address if there is one */
+		sci_port_get_sas_address(iport, &port_sas_address);
+
+		if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+			struct sci_sas_address phy_sas_address;
+
+			/* Make sure that the PHY SAS Address matches the SAS Address
+			 * for this port
+			 */
+			sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+			if (port_sas_address.high != phy_sas_address.high ||
+			    port_sas_address.low  != phy_sas_address.low)
+				return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+		}
+		return sci_port_set_phy(iport, iphy);
+	}
+	case SCI_PORT_SUB_WAITING:
+	case SCI_PORT_SUB_OPERATIONAL:
+		status = sci_port_set_phy(iport, iphy);
+
+		if (status != SCI_SUCCESS)
+			return status;
+
+		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
+		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+		port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+		return status;
+	case SCI_PORT_SUB_CONFIGURING:
+		status = sci_port_set_phy(iport, iphy);
+
+		if (status != SCI_SUCCESS)
+			return status;
+		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
+
+		/* Re-enter the configuring state since this may be the last phy in
+		 * the port.
+		 */
+		port_state_machine_change(iport,
+					  SCI_PORT_SUB_CONFIGURING);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+					 struct isci_phy *iphy)
+{
+	enum sci_status status;
+	enum sci_port_states state;
+
+	state = iport->sm.current_state_id;
+
+	switch (state) {
+	case SCI_PORT_STOPPED:
+		return sci_port_clear_phy(iport, iphy);
+	case SCI_PORT_SUB_OPERATIONAL:
+		status = sci_port_clear_phy(iport, iphy);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		sci_port_deactivate_phy(iport, iphy, true);
+		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+		port_state_machine_change(iport,
+					  SCI_PORT_SUB_CONFIGURING);
+		return SCI_SUCCESS;
+	case SCI_PORT_SUB_CONFIGURING:
+		status = sci_port_clear_phy(iport, iphy);
+
+		if (status != SCI_SUCCESS)
+			return status;
+		sci_port_deactivate_phy(iport, iphy, true);
+
+		/* Re-enter the configuring state since this may be the last phy in
+		 * the port
+		 */
+		port_state_machine_change(iport,
+					  SCI_PORT_SUB_CONFIGURING);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+				      struct isci_phy *iphy)
+{
+	enum sci_port_states state;
+
+	state = iport->sm.current_state_id;
+	switch (state) {
+	case SCI_PORT_SUB_WAITING:
+		/* Since this is the first phy going link up for the port we
+		 * can just enable it and continue
+		 */
+		sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
+
+		port_state_machine_change(iport,
+					  SCI_PORT_SUB_OPERATIONAL);
+		return SCI_SUCCESS;
+	case SCI_PORT_SUB_OPERATIONAL:
+		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
+		return SCI_SUCCESS;
+	case SCI_PORT_RESETTING:
+		/* TODO We should  make  sure  that  the phy  that  has gone
+		 * link up is the same one on which we sent the reset.  It is
+		 * possible that the phy on which we sent  the reset is not the
+		 * one that has  gone  link up  and we  want to make sure that
+		 * phy being reset  comes  back.  Consider the case where a
+		 * reset is sent but before the hardware processes the reset it
+		 * get a link up on  the  port because of a hot plug event.
+		 * because  of  the reset request this phy will go link down
+		 * almost immediately.
+		 */
+
+		/* In the resetting state we don't notify the user regarding
+		 * link up and link down notifications.
+		 */
+		sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+					struct isci_phy *iphy)
+{
+	enum sci_port_states state;
+
+	state = iport->sm.current_state_id;
+	switch (state) {
+	case SCI_PORT_SUB_OPERATIONAL:
+		sci_port_deactivate_phy(iport, iphy, true);
+
+		/* If there are no active phys left in the port, then
+		 * transition the port to the WAITING state until such time
+		 * as a phy goes link up
+		 */
+		if (iport->active_phy_mask == 0)
+			port_state_machine_change(iport,
+						  SCI_PORT_SUB_WAITING);
+		return SCI_SUCCESS;
+	case SCI_PORT_RESETTING:
+		/* In the resetting state we don't notify the user regarding
+		 * link up and link down notifications. */
+		sci_port_deactivate_phy(iport, iphy, false);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+				  struct isci_remote_device *idev,
+				  struct isci_request *ireq)
+{
+	enum sci_port_states state;
+
+	state = iport->sm.current_state_id;
+	switch (state) {
+	case SCI_PORT_SUB_WAITING:
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_PORT_SUB_OPERATIONAL:
+		iport->started_request_count++;
+		return SCI_SUCCESS;
+	default:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+				     struct isci_remote_device *idev,
+				     struct isci_request *ireq)
+{
+	enum sci_port_states state;
+
+	state = iport->sm.current_state_id;
+	switch (state) {
+	case SCI_PORT_STOPPED:
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_PORT_STOPPING:
+		sci_port_decrement_request_count(iport);
+
+		if (iport->started_request_count == 0)
+			port_state_machine_change(iport,
+						  SCI_PORT_STOPPED);
+		break;
+	case SCI_PORT_READY:
+	case SCI_PORT_RESETTING:
+	case SCI_PORT_FAILED:
+	case SCI_PORT_SUB_WAITING:
+	case SCI_PORT_SUB_OPERATIONAL:
+		sci_port_decrement_request_count(iport);
+		break;
+	case SCI_PORT_SUB_CONFIGURING:
+		sci_port_decrement_request_count(iport);
+		if (iport->started_request_count == 0) {
+			port_state_machine_change(iport,
+						  SCI_PORT_SUB_OPERATIONAL);
+		}
+		break;
+	}
+	return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+	u32 pts_control_value;
+
+	 /* enable the port task scheduler in a suspended state */
+	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+	pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+	u32 pts_control_value;
+
+	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+	pts_control_value &=
+		~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+	struct isci_host *ihost = iport->owning_controller;
+	u8 phys_index = iport->physical_port_index;
+	union scu_remote_node_context *rnc;
+	u16 rni = iport->reserved_rni;
+	u32 command;
+
+	rnc = &ihost->remote_node_context_table[rni];
+	rnc->ssp.is_valid = true;
+
+	command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+	sci_controller_post_request(ihost, command);
+
+	/* ensure hardware has seen the post rnc command and give it
+	 * ample time to act before sending the suspend
+	 */
+	readl(&ihost->smu_registers->interrupt_status); /* flush */
+	udelay(10);
+
+	command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+	sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+	if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+		/*
+		 * If we enter this state becasuse of a request to stop
+		 * the port then we want to disable the hardwares port
+		 * task scheduler. */
+		sci_port_disable_port_task_scheduler(iport);
+	}
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+	/* Enable and suspend the port task scheduler */
+	sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+	struct isci_host *ihost = iport->owning_controller;
+	u32 prev_state;
+
+	prev_state = iport->sm.previous_state_id;
+	if (prev_state  == SCI_PORT_RESETTING)
+		isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+	else
+		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+			__func__, iport->physical_port_index);
+
+	/* Post and suspend the dummy remote node context for this port. */
+	sci_port_post_dummy_remote_node(iport);
+
+	/* Start the ready substate machine */
+	port_state_machine_change(iport,
+				  SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+	sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+	sci_del_timer(&iport->timer);
+
+	sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+	isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
+{
+	int phy_index;
+	u32 phy_mask = iport->active_phy_mask;
+
+	if (timeout)
+		++iport->hang_detect_users;
+	else if (iport->hang_detect_users > 1)
+		--iport->hang_detect_users;
+	else
+		iport->hang_detect_users = 0;
+
+	if (timeout || (iport->hang_detect_users == 0)) {
+		for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+			if ((phy_mask >> phy_index) & 1) {
+				writel(timeout,
+				       &iport->phy_table[phy_index]
+					  ->link_layer_registers
+					  ->link_layer_hang_detection_timeout);
+			}
+		}
+	}
+}
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+	[SCI_PORT_STOPPED] = {
+		.enter_state = sci_port_stopped_state_enter,
+		.exit_state  = sci_port_stopped_state_exit
+	},
+	[SCI_PORT_STOPPING] = {
+		.exit_state  = sci_port_stopping_state_exit
+	},
+	[SCI_PORT_READY] = {
+		.enter_state = sci_port_ready_state_enter,
+	},
+	[SCI_PORT_SUB_WAITING] = {
+		.enter_state = sci_port_ready_substate_waiting_enter,
+		.exit_state  = scic_sds_port_ready_substate_waiting_exit,
+	},
+	[SCI_PORT_SUB_OPERATIONAL] = {
+		.enter_state = sci_port_ready_substate_operational_enter,
+		.exit_state  = sci_port_ready_substate_operational_exit
+	},
+	[SCI_PORT_SUB_CONFIGURING] = {
+		.enter_state = sci_port_ready_substate_configuring_enter
+	},
+	[SCI_PORT_RESETTING] = {
+		.exit_state  = sci_port_resetting_state_exit
+	},
+	[SCI_PORT_FAILED] = {
+		.enter_state = sci_port_failed_state_enter,
+	}
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+			     struct isci_host *ihost)
+{
+	sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+	iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
+	iport->physical_port_index = index;
+	iport->active_phy_mask     = 0;
+	iport->enabled_phy_mask    = 0;
+	iport->last_active_phy     = 0;
+	iport->ready_exit	   = false;
+
+	iport->owning_controller = ihost;
+
+	iport->started_request_count = 0;
+	iport->assigned_device_count = 0;
+	iport->hang_detect_users = 0;
+
+	iport->reserved_rni = SCU_DUMMY_INDEX;
+	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+	sci_init_timer(&iport->timer, port_timeout);
+
+	iport->port_task_scheduler_registers = NULL;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++)
+		iport->phy_table[index] = NULL;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+	struct isci_host *ihost = iport->owning_controller;
+
+	/* notify the user. */
+	isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
+{
+	wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+				 struct isci_phy *iphy)
+{
+	unsigned long flags;
+	enum sci_status status;
+	int ret = TMF_RESP_FUNC_COMPLETE;
+
+	dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+		__func__, iport);
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	set_bit(IPORT_RESET_PENDING, &iport->state);
+
+	#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+	status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (status == SCI_SUCCESS) {
+		wait_port_reset(ihost, iport);
+
+		dev_dbg(&ihost->pdev->dev,
+			"%s: iport = %p; hard reset completion\n",
+			__func__, iport);
+
+		if (iport->hard_reset_status != SCI_SUCCESS) {
+			ret = TMF_RESP_FUNC_FAILED;
+
+			dev_err(&ihost->pdev->dev,
+				"%s: iport = %p; hard reset failed (0x%x)\n",
+				__func__, iport, iport->hard_reset_status);
+		}
+	} else {
+		clear_bit(IPORT_RESET_PENDING, &iport->state);
+		wake_up(&ihost->eventq);
+		ret = TMF_RESP_FUNC_FAILED;
+
+		dev_err(&ihost->pdev->dev,
+			"%s: iport = %p; sci_port_hard_reset call"
+			" failed 0x%x\n",
+			__func__, iport, status);
+
+	}
+	return ret;
+}
+
+int isci_ata_check_ready(struct domain_device *dev)
+{
+	struct isci_port *iport = dev->port->lldd_port;
+	struct isci_host *ihost = dev_to_ihost(dev);
+	struct isci_remote_device *idev;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev = isci_lookup_device(dev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (!idev)
+		goto out;
+
+	if (test_bit(IPORT_RESET_PENDING, &iport->state))
+		goto out;
+
+	rc = !!iport->active_phy_mask;
+ out:
+	isci_put_device(idev);
+
+	return rc;
+}
+
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+	struct isci_host *ihost = phy->ha->lldd_ha;
+	struct isci_port *iport = phy->port->lldd_port;
+	unsigned long flags;
+	int i;
+
+	/* we got a port notification on a port that was subsequently
+	 * torn down and libsas is just now catching up
+	 */
+	if (!iport)
+		return;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		if (iport->active_phy_mask & 1 << i)
+			break;
+	}
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (i >= SCI_MAX_PHYS)
+		dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
+			__func__, (long) (iport - &ihost->ports[0]));
+}
+
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+	struct isci_host *ihost = phy->ha->lldd_ha;
+	struct isci_phy *iphy = to_iphy(phy);
+	struct asd_sas_port *port = phy->port;
+	struct isci_port *iport = NULL;
+	unsigned long flags;
+	int i;
+
+	/* initial ports are formed as the driver is still initializing,
+	 * wait for that process to complete
+	 */
+	wait_for_start(ihost);
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	for (i = 0; i < SCI_MAX_PORTS; i++) {
+		iport = &ihost->ports[i];
+		if (iport->active_phy_mask & 1 << iphy->phy_index)
+			break;
+	}
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (i >= SCI_MAX_PORTS)
+		iport = NULL;
+
+	port->lldd_port = iport;
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 0000000..861e8f7
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT   0xFF
+
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+	isci_freed        = 0x00,
+	isci_starting     = 0x01,
+	isci_ready        = 0x02,
+	isci_ready_for_io = 0x03,
+	isci_stopping     = 0x04,
+	isci_stopped      = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ *              need to take extra port-teardown actions that are
+ *              skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ *                    that are already part of the port
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+	struct isci_host *isci_host;
+	struct list_head remote_dev_list;
+	#define IPORT_RESET_PENDING 0
+	unsigned long state;
+	enum sci_status hard_reset_status;
+	struct sci_base_state_machine sm;
+	bool ready_exit;
+	u8 logical_port_index;
+	u8 physical_port_index;
+	u8 active_phy_mask;
+	u8 enabled_phy_mask;
+	u8 last_active_phy;
+	u16 reserved_rni;
+	u16 reserved_tag;
+	u32 started_request_count;
+	u32 assigned_device_count;
+	u32 hang_detect_users;
+	u32 not_ready_reason;
+	struct isci_phy *phy_table[SCI_MAX_PHYS];
+	struct isci_host *owning_controller;
+	struct sci_timer timer;
+	struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+	/* XXX rework: only one register, no need to replicate per-port */
+	u32 __iomem *port_pe_configuration_register;
+	struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+	SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+	SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+	SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+	SCIC_PORT_NOT_READY_RECONFIGURING,
+
+	SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+	struct sci_sas_address sas_address;
+	struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+	u32 index;
+	struct sci_port_end_point_properties local;
+	struct sci_port_end_point_properties remote;
+	u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - port state machine states
+ * @SCI_PORT_STOPPED: port has successfully been stopped.  In this state
+ *		      no new IO operations are permitted.  This state is
+ *		      entered from the STOPPING state.
+ * @SCI_PORT_STOPPING: port is in the process of stopping.  In this
+ *		       state no new IO operations are permitted, but
+ *		       existing IO operations are allowed to complete.
+ *		       This state is entered from the READY state.
+ * @SCI_PORT_READY: port is now ready.  Thus, the user is able to
+ *		    perform IO operations on this port. This state is
+ *		    entered from the STARTING state.
+ * @SCI_PORT_SUB_WAITING: port is started and ready but has no active
+ *			  phys.
+ * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
+ *			      least one phy operational.
+ * @SCI_PORT_SUB_CONFIGURING: port is started and there was an
+ *			      add/remove phy event.  This state is only
+ *			      used in Automatic Port Configuration Mode
+ *			      (APC)
+ * @SCI_PORT_RESETTING: port is in the process of performing a hard
+ *			reset.  Thus, the user is unable to perform IO
+ *			operations on this port.  This state is entered
+ *			from the READY state.
+ * @SCI_PORT_FAILED: port has failed a reset request.  This state is
+ *		     entered when a port reset request times out. This
+ *		     state is entered from the RESETTING state.
+ */
+#define PORT_STATES {\
+	C(PORT_STOPPED),\
+	C(PORT_STOPPING),\
+	C(PORT_READY),\
+	C(PORT_SUB_WAITING),\
+	C(PORT_SUB_OPERATIONAL),\
+	C(PORT_SUB_CONFIGURING),\
+	C(PORT_RESETTING),\
+	C(PORT_FAILED),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_port_states PORT_STATES;
+#undef C
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+	if (WARN_ONCE(iport->started_request_count == 0,
+		       "%s: tried to decrement started_request_count past 0!?",
+			__func__))
+		/* pass */;
+	else
+		iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+	(((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+	struct isci_port *iport,
+	u8 port_index,
+	struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+	struct isci_port *iport,
+	struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+	struct isci_port *iport,
+	struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+	struct isci_port *iport,
+	u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+	struct isci_port *iport,
+	struct isci_phy *iphy,
+	bool do_notify_user);
+
+bool sci_port_link_detected(
+	struct isci_port *iport,
+	struct isci_phy *iphy);
+
+enum sci_status sci_port_get_properties(
+	struct isci_port *iport,
+	struct sci_port_properties *prop);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+				      struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+					struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+	struct isci_port *iport,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+	struct isci_port *iport,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+	struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+	struct isci_port *iport,
+	struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+	struct isci_port *iport,
+	u32 phy_index);
+
+void sci_port_get_sas_address(
+	struct isci_port *iport,
+	struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+	struct isci_port *iport,
+	struct sci_sas_address *sas_address);
+
+void sci_port_set_hang_detection_timeout(
+	struct isci_port *isci_port,
+	u32 timeout);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+				 struct isci_phy *iphy);
+int isci_ata_check_ready(struct domain_device *dev);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 0000000..ac87974
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,760 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (1000)
+
+enum SCIC_SDS_APC_ACTIVITY {
+	SCIC_SDS_APC_SKIP_PHY,
+	SCIC_SDS_APC_ADD_PHY,
+	SCIC_SDS_APC_START_TIMER,
+
+	SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+	struct sci_sas_address address_one,
+	struct sci_sas_address address_two)
+{
+	if (address_one.high > address_two.high) {
+		return 1;
+	} else if (address_one.high < address_two.high) {
+		return -1;
+	} else if (address_one.low > address_two.low) {
+		return 1;
+	} else if (address_one.low < address_two.low) {
+		return -1;
+	}
+
+	/* The two SAS Address must be identical */
+	return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy.  This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+	struct isci_host *ihost,
+	struct isci_phy *iphy)
+{
+	u8 i;
+	struct sci_sas_address port_sas_address;
+	struct sci_sas_address port_attached_device_address;
+	struct sci_sas_address phy_sas_address;
+	struct sci_sas_address phy_attached_device_address;
+
+	/*
+	 * Since this phy can be a member of a wide port check to see if one or
+	 * more phys match the sent and received SAS address as this phy in which
+	 * case it should participate in the same port.
+	 */
+	sci_phy_get_sas_address(iphy, &phy_sas_address);
+	sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+	for (i = 0; i < ihost->logical_port_entries; i++) {
+		struct isci_port *iport = &ihost->ports[i];
+
+		sci_port_get_sas_address(iport, &port_sas_address);
+		sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+		if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+		    sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+			return iport;
+	}
+
+	return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware.  The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+	struct isci_host *ihost,
+	struct sci_port_configuration_agent *port_agent)
+{
+	struct sci_sas_address first_address;
+	struct sci_sas_address second_address;
+
+	/*
+	 * Sanity check the max ranges for all the phys the max index
+	 * is always equal to the port range index */
+	if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+	    port_agent->phy_valid_port_range[1].max_index != 1 ||
+	    port_agent->phy_valid_port_range[2].max_index != 2 ||
+	    port_agent->phy_valid_port_range[3].max_index != 3)
+		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+	/*
+	 * This is a request to configure a single x4 port or at least attempt
+	 * to make all the phys into a single port */
+	if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+	    port_agent->phy_valid_port_range[1].min_index == 0 &&
+	    port_agent->phy_valid_port_range[2].min_index == 0 &&
+	    port_agent->phy_valid_port_range[3].min_index == 0)
+		return SCI_SUCCESS;
+
+	/*
+	 * This is a degenerate case where phy 1 and phy 2 are assigned
+	 * to the same port this is explicitly disallowed by the hardware
+	 * unless they are part of the same x4 port and this condition was
+	 * already checked above. */
+	if (port_agent->phy_valid_port_range[2].min_index == 1) {
+		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+	}
+
+	/*
+	 * PE0 and PE3 can never have the same SAS Address unless they
+	 * are part of the same x4 wide port and we have already checked
+	 * for this condition. */
+	sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+	sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+	if (sci_sas_address_compare(first_address, second_address) == 0) {
+		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+	}
+
+	/*
+	 * PE0 and PE1 are configured into a 2x1 ports make sure that the
+	 * SAS Address for PE0 and PE2 are different since they can not be
+	 * part of the same port. */
+	if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+	    port_agent->phy_valid_port_range[1].min_index == 1) {
+		sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+		sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+		if (sci_sas_address_compare(first_address, second_address) == 0) {
+			return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+		}
+	}
+
+	/*
+	 * PE2 and PE3 are configured into a 2x1 ports make sure that the
+	 * SAS Address for PE1 and PE3 are different since they can not be
+	 * part of the same port. */
+	if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+	    port_agent->phy_valid_port_range[3].min_index == 3) {
+		sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+		sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+		if (sci_sas_address_compare(first_address, second_address) == 0) {
+			return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+		}
+	}
+
+	return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+					      struct sci_port_configuration_agent *port_agent)
+{
+	u32 phy_mask;
+	u32 assigned_phy_mask;
+	struct sci_sas_address sas_address;
+	struct sci_sas_address phy_assigned_address;
+	u8 port_index;
+	u8 phy_index;
+
+	assigned_phy_mask = 0;
+	sas_address.high = 0;
+	sas_address.low = 0;
+
+	for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+		phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+		if (!phy_mask)
+			continue;
+		/*
+		 * Make sure that one or more of the phys were not already assinged to
+		 * a different port. */
+		if ((phy_mask & ~assigned_phy_mask) == 0) {
+			return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+		}
+
+		/* Find the starting phy index for this round through the loop */
+		for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+			if ((phy_mask & (1 << phy_index)) == 0)
+				continue;
+			sci_phy_get_sas_address(&ihost->phys[phy_index],
+						     &sas_address);
+
+			/*
+			 * The phy_index can be used as the starting point for the
+			 * port range since the hardware starts all logical ports
+			 * the same as the PE index. */
+			port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+			port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+			if (phy_index != port_index) {
+				return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+			}
+
+			break;
+		}
+
+		/*
+		 * See how many additional phys are being added to this logical port.
+		 * Note: We have not moved the current phy_index so we will actually
+		 *       compare the startting phy with itself.
+		 *       This is expected and required to add the phy to the port. */
+		while (phy_index < SCI_MAX_PHYS) {
+			if ((phy_mask & (1 << phy_index)) == 0)
+				continue;
+			sci_phy_get_sas_address(&ihost->phys[phy_index],
+						     &phy_assigned_address);
+
+			if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+				/*
+				 * The phy mask specified that this phy is part of the same port
+				 * as the starting phy and it is not so fail this configuration */
+				return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+			}
+
+			port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+			port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+			sci_port_add_phy(&ihost->ports[port_index],
+					      &ihost->phys[phy_index]);
+
+			assigned_phy_mask |= (1 << phy_index);
+			phy_index++;
+		}
+
+	}
+
+	return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+	u8 index;
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct sci_port_configuration_agent *port_agent;
+	struct isci_host *ihost;
+	unsigned long flags;
+	u16 configure_phy_mask;
+
+	port_agent = container_of(tmr, typeof(*port_agent), timer);
+	ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	port_agent->timer_pending = false;
+
+	/* Find the mask of phys that are reported read but as yet unconfigured into a port */
+	configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		struct isci_phy *iphy = &ihost->phys[index];
+
+		if (configure_phy_mask & (1 << index)) {
+			port_agent->link_up_handler(ihost, port_agent,
+						    phy_get_non_dummy_port(iphy),
+						    iphy);
+		}
+	}
+
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+				       struct sci_port_configuration_agent *port_agent,
+				       struct isci_port *iport,
+				       struct isci_phy *iphy)
+{
+	/* If the port is NULL then the phy was not assigned to a port.
+	 * This is because the phy was not given the same SAS Address as
+	 * the other PHYs in the port.
+	 */
+	if (!iport)
+		return;
+
+	port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+	sci_port_link_up(iport, iphy);
+	if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+		port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ *    notification.
+ * @port: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.  The port is an invalid
+ *    handle only if the phy was never port of this port.  This happens when
+ *    the phy is not broadcasting the same SAS address as the other phys in the
+ *    assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event.  If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+	struct isci_host *ihost,
+	struct sci_port_configuration_agent *port_agent,
+	struct isci_port *iport,
+	struct isci_phy *iphy)
+{
+	if (iport != NULL) {
+		/*
+		 * If we can form a new port from the remainder of the phys
+		 * then we want to start the timer to allow the SCI User to
+		 * cleanup old devices and rediscover the port before
+		 * rebuilding the port with the phys that remain in the ready
+		 * state.
+		 */
+		port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+		port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+		/*
+		 * Check to see if there are more phys waiting to be
+		 * configured into a port. If there are allow the SCI User
+		 * to tear down this port, if necessary, and then reconstruct
+		 * the port after the timeout.
+		 */
+		if ((port_agent->phy_configured_mask == 0x0000) &&
+		    (port_agent->phy_ready_mask != 0x0000) &&
+		    !port_agent->timer_pending) {
+			port_agent->timer_pending = true;
+
+			sci_mod_timer(&port_agent->timer,
+				      SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+		}
+
+		sci_port_link_down(iport, iphy);
+	}
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+					      struct sci_port_configuration_agent *port_agent)
+{
+	u8 phy_index;
+	u8 port_index;
+	struct sci_sas_address sas_address;
+	struct sci_sas_address phy_assigned_address;
+
+	phy_index = 0;
+
+	while (phy_index < SCI_MAX_PHYS) {
+		port_index = phy_index;
+
+		/* Get the assigned SAS Address for the first PHY on the controller. */
+		sci_phy_get_sas_address(&ihost->phys[phy_index],
+					    &sas_address);
+
+		while (++phy_index < SCI_MAX_PHYS) {
+			sci_phy_get_sas_address(&ihost->phys[phy_index],
+						     &phy_assigned_address);
+
+			/* Verify each of the SAS address are all the same for every PHY */
+			if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+				port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+				port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+			} else {
+				port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+				port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+				break;
+			}
+		}
+	}
+
+	return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
+				      u32 timeout)
+{
+	port_agent->timer_pending = true;
+	sci_mod_timer(&port_agent->timer, timeout);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+					       struct sci_port_configuration_agent *port_agent,
+					       struct isci_phy *iphy,
+					       bool start_timer)
+{
+	u8 port_index;
+	enum sci_status status;
+	struct isci_port *iport;
+	enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+	iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+	if (iport) {
+		if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+			apc_activity = SCIC_SDS_APC_ADD_PHY;
+		else
+			apc_activity = SCIC_SDS_APC_SKIP_PHY;
+	} else {
+		/*
+		 * There is no matching Port for this PHY so lets search through the
+		 * Ports and see if we can add the PHY to its own port or maybe start
+		 * the timer and wait to see if a wider port can be made.
+		 *
+		 * Note the break when we reach the condition of the port id == phy id */
+		for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+		     port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+		     port_index++) {
+
+			iport = &ihost->ports[port_index];
+
+			/* First we must make sure that this PHY can be added to this Port. */
+			if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+				/*
+				 * Port contains a PHY with a greater PHY ID than the current
+				 * PHY that has gone link up.  This phy can not be part of any
+				 * port so skip it and move on. */
+				if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+					apc_activity = SCIC_SDS_APC_SKIP_PHY;
+					break;
+				}
+
+				/*
+				 * We have reached the end of our Port list and have not found
+				 * any reason why we should not either add the PHY to the port
+				 * or wait for more phys to become active. */
+				if (iport->physical_port_index == iphy->phy_index) {
+					/*
+					 * The Port either has no active PHYs.
+					 * Consider that if the port had any active PHYs we would have
+					 * or active PHYs with
+					 * a lower PHY Id than this PHY. */
+					if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+						apc_activity = SCIC_SDS_APC_ADD_PHY;
+					}
+
+					break;
+				}
+
+				/*
+				 * The current Port has no active PHYs and this PHY could be part
+				 * of this Port.  Since we dont know as yet setup to start the
+				 * timer and see if there is a better configuration. */
+				if (iport->active_phy_mask == 0) {
+					apc_activity = SCIC_SDS_APC_START_TIMER;
+				}
+			} else if (iport->active_phy_mask != 0) {
+				/*
+				 * The Port has an active phy and the current Phy can not
+				 * participate in this port so skip the PHY and see if
+				 * there is a better configuration. */
+				apc_activity = SCIC_SDS_APC_SKIP_PHY;
+			}
+		}
+	}
+
+	/*
+	 * Check to see if the start timer operations should instead map to an
+	 * add phy operation.  This is caused because we have been waiting to
+	 * add a phy to a port but could not becuase the automatic port
+	 * configuration engine had a choice of possible ports for the phy.
+	 * Since we have gone through a timeout we are going to restrict the
+	 * choice to the smallest possible port. */
+	if (
+		(start_timer == false)
+		&& (apc_activity == SCIC_SDS_APC_START_TIMER)
+		) {
+		apc_activity = SCIC_SDS_APC_ADD_PHY;
+	}
+
+	switch (apc_activity) {
+	case SCIC_SDS_APC_ADD_PHY:
+		status = sci_port_add_phy(iport, iphy);
+
+		if (status == SCI_SUCCESS) {
+			port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+		}
+		break;
+
+	case SCIC_SDS_APC_START_TIMER:
+		sci_apc_agent_start_timer(port_agent,
+					  SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+		break;
+
+	case SCIC_SDS_APC_SKIP_PHY:
+	default:
+		/* do nothing the PHY can not be made part of a port at this time. */
+		break;
+	}
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ *    notification.
+ * @sci_port: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+				       struct sci_port_configuration_agent *port_agent,
+				       struct isci_port *iport,
+				       struct isci_phy *iphy)
+{
+	u8 phy_index  = iphy->phy_index;
+
+	if (!iport) {
+		/* the phy is not the part of this port */
+		port_agent->phy_ready_mask |= 1 << phy_index;
+		sci_apc_agent_start_timer(port_agent,
+					  SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+	} else {
+		/* the phy is already the part of the port */
+		port_agent->phy_ready_mask |= 1 << phy_index;
+		sci_port_link_up(iport, iphy);
+	}
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ *    notification.
+ * @iport: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+	struct isci_host *ihost,
+	struct sci_port_configuration_agent *port_agent,
+	struct isci_port *iport,
+	struct isci_phy *iphy)
+{
+	port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+	if (!iport)
+		return;
+	if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+		enum sci_status status;
+
+		status = sci_port_remove_phy(iport, iphy);
+
+		if (status == SCI_SUCCESS)
+			port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+	}
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+	u32 index;
+	struct sci_timer *tmr = (struct sci_timer *)data;
+	struct sci_port_configuration_agent *port_agent;
+	struct isci_host *ihost;
+	unsigned long flags;
+	u16 configure_phy_mask;
+
+	port_agent = container_of(tmr, typeof(*port_agent), timer);
+	ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (tmr->cancel)
+		goto done;
+
+	port_agent->timer_pending = false;
+
+	configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+	if (!configure_phy_mask)
+		goto done;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		if ((configure_phy_mask & (1 << index)) == 0)
+			continue;
+
+		sci_apc_agent_configure_ports(ihost, port_agent,
+						   &ihost->phys[index], false);
+	}
+
+	if (is_controller_start_complete(ihost))
+		sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+
+done:
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+	struct sci_port_configuration_agent *port_agent)
+{
+	u32 index;
+
+	port_agent->phy_configured_mask = 0x00;
+	port_agent->phy_ready_mask = 0x00;
+
+	port_agent->link_up_handler = NULL;
+	port_agent->link_down_handler = NULL;
+
+	port_agent->timer_pending = false;
+
+	for (index = 0; index < SCI_MAX_PORTS; index++) {
+		port_agent->phy_valid_port_range[index].min_index = 0;
+		port_agent->phy_valid_port_range[index].max_index = 0;
+	}
+}
+
+bool is_port_config_apc(struct isci_host *ihost)
+{
+	return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+	struct isci_host *ihost,
+	struct sci_port_configuration_agent *port_agent)
+{
+	enum sci_status status;
+	enum sci_port_configuration_mode mode;
+
+	mode = ihost->oem_parameters.controller.mode_type;
+
+	if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+		status = sci_mpc_agent_validate_phy_configuration(
+				ihost, port_agent);
+
+		port_agent->link_up_handler = sci_mpc_agent_link_up;
+		port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+		sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+	} else {
+		status = sci_apc_agent_validate_phy_configuration(
+				ihost, port_agent);
+
+		port_agent->link_up_handler = sci_apc_agent_link_up;
+		port_agent->link_down_handler = sci_apc_agent_link_down;
+
+		sci_init_timer(&port_agent->timer, apc_agent_timeout);
+	}
+
+	return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 0000000..8ac646e
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,230 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+	'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+	void __iomem *oprom = pci_map_biosrom(pdev);
+	struct isci_orom *rom = NULL;
+	size_t len, i;
+	int j;
+	char oem_sig[4];
+	struct isci_oem_hdr oem_hdr;
+	u8 *tmp, sum;
+
+	if (!oprom)
+		return NULL;
+
+	len = pci_biosrom_size(pdev);
+	rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+	if (!rom) {
+		dev_warn(&pdev->dev,
+			 "Unable to allocate memory for orom\n");
+		return NULL;
+	}
+
+	for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+		memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+		/* we think we found the OEM table */
+		if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+			size_t copy_len;
+
+			memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+			copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+				       sizeof(*rom));
+
+			memcpy_fromio(rom,
+				      oprom + i + sizeof(oem_hdr),
+				      copy_len);
+
+			/* calculate checksum */
+			tmp = (u8 *)&oem_hdr;
+			for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+				sum += *tmp;
+
+			tmp = (u8 *)rom;
+			for (j = 0; j < sizeof(*rom); j++, tmp++)
+				sum += *tmp;
+
+			if (sum != 0) {
+				dev_warn(&pdev->dev,
+					 "OEM table checksum failed\n");
+				continue;
+			}
+
+			/* keep going if that's not the oem param table */
+			if (memcmp(rom->hdr.signature,
+				   ISCI_ROM_SIG,
+				   ISCI_ROM_SIG_SIZE) != 0)
+				continue;
+
+			dev_info(&pdev->dev,
+				 "OEM parameter table found in OROM\n");
+			break;
+		}
+	}
+
+	if (i >= len) {
+		dev_err(&pdev->dev, "oprom parse error\n");
+		rom = NULL;
+	}
+	pci_unmap_biosrom(oprom);
+
+	return rom;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+	struct isci_orom *orom = NULL, *data;
+	int i, j;
+
+	if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+		return NULL;
+
+	if (fw->size < sizeof(*orom))
+		goto out;
+
+	data = (struct isci_orom *)fw->data;
+
+	if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+		    strlen(ISCI_ROM_SIG)) != 0)
+		goto out;
+
+	orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+	if (!orom)
+		goto out;
+
+	memcpy(orom, fw->data, fw->size);
+
+	if (is_c0(pdev) || is_c1(pdev))
+		goto out;
+
+	/*
+	 * deprecated: override default amp_control for pre-preproduction
+	 * silicon revisions
+	 */
+	for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+		for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+			orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+			orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+			orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+			orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+		}
+ out:
+	release_firmware(fw);
+
+	return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+	return &efi;
+#else
+	return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+	efi_status_t status;
+	struct isci_orom *rom;
+	struct isci_oem_hdr *oem_hdr;
+	u8 *tmp, sum;
+	int j;
+	unsigned long data_len;
+	u8 *efi_data;
+	u32 efi_attrib = 0;
+
+	data_len = 1024;
+	efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+	if (!efi_data) {
+		dev_warn(&pdev->dev,
+			 "Unable to allocate memory for EFI data\n");
+		return NULL;
+	}
+
+	rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+	if (get_efi())
+		status = get_efi()->get_variable(isci_efivar_name,
+						 &ISCI_EFI_VENDOR_GUID,
+						 &efi_attrib,
+						 &data_len,
+						 efi_data);
+	else
+		status = EFI_NOT_FOUND;
+
+	if (status != EFI_SUCCESS) {
+		dev_warn(&pdev->dev,
+			 "Unable to obtain EFI var data for OEM parms\n");
+		return NULL;
+	}
+
+	oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+	if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+		dev_warn(&pdev->dev,
+			 "Invalid OEM header signature\n");
+		return NULL;
+	}
+
+	/* calculate checksum */
+	tmp = (u8 *)efi_data;
+	for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+		sum += *tmp;
+
+	if (sum != 0) {
+		dev_warn(&pdev->dev,
+			 "OEM table checksum failed\n");
+		return NULL;
+	}
+
+	if (memcmp(rom->hdr.signature,
+		   ISCI_ROM_SIG,
+		   ISCI_ROM_SIG_SIZE) != 0) {
+		dev_warn(&pdev->dev,
+			 "Invalid OEM table signature\n");
+		return NULL;
+	}
+
+	return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 0000000..e08b578
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,330 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED   0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+	struct sci_phy_user_params {
+		/**
+		 * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+		 * insertion frequency for this phy index.
+		 */
+		u32 notify_enable_spin_up_insertion_frequency;
+
+		/**
+		 * This method specifies the number of transmitted DWORDs within which
+		 * to transmit a single ALIGN primitive.  This value applies regardless
+		 * of what type of device is attached or connection state.  A value of
+		 * 0 indicates that no ALIGN primitives will be inserted.
+		 */
+		u16 align_insertion_frequency;
+
+		/**
+		 * This method specifies the number of transmitted DWORDs within which
+		 * to transmit 2 ALIGN primitives.  This applies for SAS connections
+		 * only.  A minimum value of 3 is required for this field.
+		 */
+		u16 in_connection_align_insertion_frequency;
+
+		/**
+		 * This field indicates the maximum speed generation to be utilized
+		 * by phys in the supplied port.
+		 * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+		 * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+		 * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+		 */
+		u8 max_speed_generation;
+
+	} phys[SCI_MAX_PHYS];
+
+	/**
+	 * This field specifies the maximum number of direct attached devices
+	 * that can have power supplied to them simultaneously.
+	 */
+	u8 max_concurr_spinup;
+
+	/**
+	 * This field specifies the number of seconds to allow a phy to consume
+	 * power before yielding to another phy.
+	 *
+	 */
+	u8 phy_spin_up_delay_interval;
+
+	/**
+	 * These timer values specifies how long a link will remain open with no
+	 * activity in increments of a microsecond, it can be in increments of
+	 * 100 microseconds if the upper most bit is set.
+	 *
+	 */
+	u16 stp_inactivity_timeout;
+	u16 ssp_inactivity_timeout;
+
+	/**
+	 * These timer values specifies how long a link will remain open in increments
+	 * of 100 microseconds.
+	 *
+	 */
+	u16 stp_max_occupancy_timeout;
+	u16 ssp_max_occupancy_timeout;
+
+	/**
+	 * This timer value specifies how long a link will remain open with no
+	 * outbound traffic in increments of a microsecond.
+	 *
+	 */
+	u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+	u8 sig[4];
+	u8 rev_major;
+	u8 rev_minor;
+	u16 len;
+	u8 checksum;
+	u8 reserved1;
+	u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME		"isci/isci_firmware.bin"
+
+#define ROMSIGNATURE		0xaa55
+
+#define ISCI_OEM_SIG		"$OEM"
+#define ISCI_OEM_SIG_SIZE	4
+#define ISCI_ROM_SIG		"ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE	8
+
+#define ISCI_EFI_VENDOR_GUID	\
+	EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+			0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME	"RstScuO"
+
+#define ISCI_ROM_VER_1_0	0x10
+#define ISCI_ROM_VER_1_1	0x11
+#define ISCI_ROM_VER_1_3	0x13
+#define ISCI_ROM_VER_LATEST	ISCI_ROM_VER_1_3
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT.  It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+	SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+	SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+	uint8_t signature[ISCI_ROM_SIG_SIZE];
+	uint16_t total_block_length;
+	uint8_t hdr_length;
+	uint8_t version;
+	uint8_t preboot_source;
+	uint8_t num_elements;
+	uint16_t element_length;
+	uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+	struct {
+		uint8_t mode_type;
+		uint8_t max_concurr_spin_up;
+		/*
+		 * This bitfield indicates the OEM's desired default Tx
+		 * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+		 * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+		 */
+		union {
+			struct {
+			/*
+			 * NOTE: Max spread for SATA is +0 / -5000 PPM.
+			 * Down-spreading SSC (only method allowed for SATA):
+			 *  SATA SSC Tx Disabled                    = 0x0
+			 *  SATA SSC Tx at +0 / -1419 PPM Spread    = 0x2
+			 *  SATA SSC Tx at +0 / -2129 PPM Spread    = 0x3
+			 *  SATA SSC Tx at +0 / -4257 PPM Spread    = 0x6
+			 *  SATA SSC Tx at +0 / -4967 PPM Spread    = 0x7
+			 */
+				uint8_t ssc_sata_tx_spread_level:4;
+			/*
+			 * SAS SSC Tx Disabled                     = 0x0
+			 *
+			 * NOTE: Max spread for SAS down-spreading +0 /
+			 *	 -2300 PPM
+			 * Down-spreading SSC:
+			 *  SAS SSC Tx at +0 / -1419 PPM Spread     = 0x2
+			 *  SAS SSC Tx at +0 / -2129 PPM Spread     = 0x3
+			 *
+			 * NOTE: Max spread for SAS center-spreading +2300 /
+			 *	 -2300 PPM
+			 * Center-spreading SSC:
+			 *  SAS SSC Tx at +1064 / -1064 PPM Spread  = 0x3
+			 *  SAS SSC Tx at +2129 / -2129 PPM Spread  = 0x6
+			 */
+				uint8_t ssc_sas_tx_spread_level:3;
+			/*
+			 * NOTE: Refer to the SSC section of the SAS 2.x
+			 * Specification for proper setting of this field.
+			 * For standard SAS Initiator SAS PHY operation it
+			 * should be 0 for Down-spreading.
+			 * SAS SSC Tx spread type:
+			 *  Down-spreading SSC      = 0
+			 *  Center-spreading SSC    = 1
+			 */
+				uint8_t ssc_sas_tx_type:1;
+			};
+			uint8_t do_enable_ssc;
+		};
+		/*
+		 * This field indicates length of the SAS/SATA cable between
+		 * host and device.
+		 * This field is used make relationship between analog
+		 * parameters of the phy in the silicon and length of the cable.
+		 * Supported cable attenuation levels:
+		 * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+		 * 6m.
+		 *
+		 * This is bit mask field:
+		 *
+		 * BIT:      (MSB) 7     6     5     4
+		 * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Medium cable
+		 *                                           length assignment
+		 * BIT:            3     2     1     0  (LSB)
+		 * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Long cable length
+		 *                                           assignment
+		 *
+		 * BITS 7-4 are set when the cable length is assigned to medium
+		 * BITS 3-0 are set when the cable length is assigned to long
+		 *
+		 * The BIT positions are clear when the cable length is
+		 * assigned to short.
+		 *
+		 * Setting the bits for both long and medium cable length is
+		 * undefined.
+		 *
+		 * A value of 0x84 would assign
+		 *    phy3 - medium
+		 *    phy2 - long
+		 *    phy1 - short
+		 *    phy0 - short
+		 */
+		uint8_t cable_selection_mask;
+	} controller;
+
+	struct {
+		uint8_t phy_mask;
+	} ports[SCI_MAX_PORTS];
+
+	struct sci_phy_oem_params {
+		struct {
+			uint32_t high;
+			uint32_t low;
+		} sas_address;
+
+		uint32_t afe_tx_amp_control0;
+		uint32_t afe_tx_amp_control1;
+		uint32_t afe_tx_amp_control2;
+		uint32_t afe_tx_amp_control3;
+	} phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+	struct sci_bios_oem_param_block_hdr hdr;
+	struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 0000000..97f3ceb
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1863 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ *    registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK         (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT        (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK   (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT  (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK   (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT  (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK      (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT     (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK     (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT    (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID   (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT      (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT       (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP  (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR	    \
+	(\
+		SCU_VIIT_IPPT_SSP_INITIATOR  \
+		| SCU_VIIT_IPPT_SMP_INITIATOR  \
+		| SCU_VIIT_IPPT_STP_INITIATOR  \
+	)
+
+#define SCU_VIIT_STATUS_RNC_VALID      (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID  (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID      (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID      \
+	(\
+		SCU_VIIT_STATUS_RNC_VALID	\
+		| SCU_VIIT_STATUS_ADDRESS_VALID	  \
+		| SCU_VIIT_STATUS_RNI_VALID	  \
+	)
+
+#define SCU_VIIT_IPPT_SMP_TARGET    (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+	/**
+	 * This must be encoded as to the type of initiator that is being constructed
+	 * for this port.
+	 */
+	u32 status;
+
+	/**
+	 * Virtual initiator high SAS Address
+	 */
+	u32 initiator_sas_address_hi;
+
+	/**
+	 * Virtual initiator low SAS Address
+	 */
+	u32 initiator_sas_address_lo;
+
+	/**
+	 * This must be 0
+	 */
+	u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK                (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT               (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK     (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT    (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK               (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT              (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK            (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT           (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK  (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK  (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID   (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT      (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT       (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP  (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ *    virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+	u32 status;
+	u32 remote_initiator_sas_address_hi;
+	u32 remote_initiator_sas_address_lo;
+	u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+	(((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+	SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+	((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+	((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT         (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK          (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT    (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK     (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT       (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK        (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT       (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK        (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK               (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT       (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK        (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT    (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK     (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT      (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK       (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK          (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR   SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION    SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT         (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK          (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT      (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK       (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT        (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK         (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK            (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR   SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION    SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT    (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK     (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT   (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK    (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK  (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT      (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK       (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT     (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK      (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT     (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK      (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK    (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+	SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT          (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK           (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT        (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK         (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT    (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK     (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT  (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK   (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK          (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT          (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK           (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT        (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK         (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT    (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK     (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT  (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK   (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT           (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK            (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT     (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK      (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK          (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+	SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+	SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value)	\
+	SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT  (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK   (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT  (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK   (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK      (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+	SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+	SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT    (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK     (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT    (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK     (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT   (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK    (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT   (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK    (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK   (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+	(\
+		((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+		>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+	)
+
+#define SMU_DCC_GET_MAX_LP(value) \
+	(\
+		((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+		>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+	)
+
+#define SMU_DCC_GET_MAX_TC(value) \
+	(\
+		((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+		>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+	)
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+	(\
+		((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+		>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+	)
+
+/* ***************************************************************************** */
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT    (0)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK     (0x00000001)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT    (1)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK     (0x00000002)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT   (2)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK    (0x00000004)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT  (3)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK   (0x00000008)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT   (16)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK    (0x000F0000)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT     (31)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK      (0x80000000)
+#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK        (0x7FF0FFF0)
+
+#define SMU_CGUCR_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
+
+#define SMU_CGUCR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT      (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK       (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT    (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK     (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT     (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK      (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT   (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK    (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK                        (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED	\
+	(SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED	\
+	(SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+	(\
+		SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+		| SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+	)
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT  (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK   (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT  (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK   (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT  (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK   (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT  (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK   (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT  (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK   (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT  (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK   (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT  (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK   (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT  (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK   (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+	((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+	(\
+		SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+		| SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+		| SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+		| SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+	)
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+	(\
+		SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+		| SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+	)
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT  (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK   (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT  (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK   (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT  (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK   (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT  (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK   (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+	((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT      (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK       (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT      (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK       (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT       (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK        (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+	(\
+		(1 << ((peg) + 20)) \
+		| SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+		| SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+		| SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+	)
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+	(\
+		SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+		| SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+	)
+
+#define SMU_RESET_SCU()  (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT              (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK               (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT                (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK                 (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT    (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK     (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK               (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT   (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK    (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK      (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+	SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT      (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK       (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT    (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK     (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK      (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name)	\
+	SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT      (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK       (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT    (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK     (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT   (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK    (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK      (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name)	\
+	SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+	SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+	SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value)	\
+	(SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+	(~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+	(SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT                               (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK                                (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT                    (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK                     (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT                            (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK                             (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT                   (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK                    (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT               (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK                (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT     (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK      (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT        (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK         (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT                        (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK                         (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK                                        (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+	SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+	SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT                    (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK                     (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT             (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK              (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT           (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK            (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT   (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK    (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT  (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK   (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK             (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK              (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK             (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+	SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT            (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK             (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT  (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK   (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT     (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK      (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK                       (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT       (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK        (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT       (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK        (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+	SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT            (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK             (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT            (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK             (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT            (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK             (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT          (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK           (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT         (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK          (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT         (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK          (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT         (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK          (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT           (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK            (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT    (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK     (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT           (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK            (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK               (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT      (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK       (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT   (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK    (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT  (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK   (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT                       (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK                        (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK                  (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+	SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT                     (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK                      (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT                          (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK                           (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT                   (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK                    (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT                 (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK                  (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT            (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK             (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT             (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK              (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT                    (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK                     (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT      (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK       (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT                          (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK                           (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT                          (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK                           (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT        (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK         (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT              (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK               (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT    (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK     (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT                           (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK                            (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT                    (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK                     (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT                   (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK                    (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK                             (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK                              (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK                             (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT      (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK       (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT    (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK     (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT    (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK     (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT   (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK    (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK  (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT     (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK      (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT       (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK        (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT     (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK      (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT      (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK       (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT     (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK      (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT      (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK       (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT     (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK      (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT      (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK       (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT   (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK    (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK        (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK       (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK       (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT  (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK   (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT         (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK          (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK                          (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value)	\
+	SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT        (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK         (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT      (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK       (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT        (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK         (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT      (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK       (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK  (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT      (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK       (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK  (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT      (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK       (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK  (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT      (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK       (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK  (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT      (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK       (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK             (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+	SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT     (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK      (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT         (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK          (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT          (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK           (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT           (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK            (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK               (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK              (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK              (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+	SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT          (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK           (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK  (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+	SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT  (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK   (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK          (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+	SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT  (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK   (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT   (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK    (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK  (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT             (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK              (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT    (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK     (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT             (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK              (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK                   (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ *       BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ *    TCA0 for FN#0 is at BAR0 + 0x0400
+ *    TCA1 for FN#1 is at BAR0 + 0x0404
+ *    etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET          0x0000
+#define SCU_SMU_AMR_OFFSET          0x0004
+#define SCU_SMU_ISR_OFFSET          0x0010
+#define SCU_SMU_IMR_OFFSET          0x0014
+#define SCU_SMU_ICC_OFFSET          0x0018
+#define SCU_SMU_HTTLBAR_OFFSET      0x0020
+#define SCU_SMU_HTTUBAR_OFFSET      0x0024
+#define SCU_SMU_TCR_OFFSET          0x0028
+#define SCU_SMU_CQLBAR_OFFSET       0x0030
+#define SCU_SMU_CQUBAR_OFFSET       0x0034
+#define SCU_SMU_CQPR_OFFSET         0x0040
+#define SCU_SMU_CQGR_OFFSET         0x0044
+#define SCU_SMU_CQC_OFFSET          0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET      0x0080
+#define SCU_SMU_RNCUBAR_OFFSET      0x0084
+#define SCU_SMU_DCC_OFFSET          0x0090
+#define SCU_SMU_DFC_OFFSET          0x0094
+#define SCU_SMU_SMUCSR_OFFSET       0x0098
+#define SCU_SMU_SCUSRCR_OFFSET      0x009C
+#define SCU_SMU_SMAW_OFFSET         0x00A0
+#define SCU_SMU_SMDW_OFFSET         0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET          0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET     0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET     0x2004
+#define SCU_SMU_MT_MDR0_OFFSET      0x2008
+#define SCU_SMU_MT_VCR0_OFFSET      0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET     0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET     0x2014
+#define SCU_SMU_MT_MDR1_OFFSET      0x2018
+#define SCU_SMU_MT_VCR1_OFFSET      0x201C
+#define SCU_SMU_MPBA_OFFSET         0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+	u32 post_context_port;
+/* 0x0004 AMR */
+	u32 address_modifier;
+	u32 reserved_08;
+	u32 reserved_0C;
+/* 0x0010 ISR */
+	u32 interrupt_status;
+/* 0x0014 IMR */
+	u32 interrupt_mask;
+/* 0x0018 ICC */
+	u32 interrupt_coalesce_control;
+	u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+	u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+	u32 host_task_table_upper;
+/* 0x0028 TCR */
+	u32 task_context_range;
+	u32 reserved_2C;
+/* 0x0030 CQLBAR */
+	u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+	u32 completion_queue_upper;
+	u32 reserved_38;
+	u32 reserved_3C;
+/* 0x0040 CQPR */
+	u32 completion_queue_put;
+/* 0x0044 CQGR */
+	u32 completion_queue_get;
+/* 0x0048 CQC */
+	u32 completion_queue_control;
+	u32 reserved_4C;
+	u32 reserved_5x[4];
+	u32 reserved_6x[4];
+	u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+	u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+	u32 remote_node_context_upper;
+	u32 reserved_88;
+	u32 reserved_8C;
+/* 0x0090 DCC */
+	u32 device_context_capacity;
+/* 0x0094 DFC */
+	u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+	u32 control_status;
+/* 0x009C SCUSRCR */
+	u32 soft_reset_control;
+/* 0x00A0 SMAW */
+	u32 mmr_address_window;
+/* 0x00A4 SMDW */
+	u32 mmr_data_window;
+/* 0x00A8 CGUCR */
+	u32 clock_gating_control;
+/* 0x00AC CGUPC */
+	u32 clock_gating_performance;
+/* A whole bunch of reserved space */
+	u32 reserved_Bx[4];
+	u32 reserved_Cx[4];
+	u32 reserved_Dx[4];
+	u32 reserved_Ex[4];
+	u32 reserved_Fx[4];
+	u32 reserved_1xx[64];
+	u32 reserved_2xx[64];
+	u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+	u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE               0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET   0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET   0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET     0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET     0x000C
+#define SCU_SDMA_UFQC_OFFSET        0x0010
+#define SCU_SDMA_UFQPP_OFFSET       0x0014
+#define SCU_SDMA_UFQGP_OFFSET       0x0018
+#define SCU_SDMA_PDMACR_OFFSET      0x001C
+#define SCU_SDMA_CDMACR_OFFSET      0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+	u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+	u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+	u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+	u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+	u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+	u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+	u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+	u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+	u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+	u32 cdma_configuration;
+/* Remainder SDMA register space */
+	u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET    0x0000
+#define SCU_PEG1_OFFSET    0x8000
+
+#define SCU_TL0_OFFSET     0x0000
+#define SCU_TL1_OFFSET     0x0400
+#define SCU_TL2_OFFSET     0x0800
+#define SCU_TL3_OFFSET     0x0C00
+
+#define SCU_LL_OFFSET      0x0080
+#define SCU_LL0_OFFSET     (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET     (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET     (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET     (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET         0x0000
+#define SCU_TLADTR_OFFSET       0x0004
+#define SCU_TLTTMR_OFFSET       0x0008
+#define SCU_TLEECR0_OFFSET      0x000C
+#define SCU_STPTLDARNI_OFFSET   0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT    (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK     (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK  (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT     (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK      (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT         (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK          (0x00000010)
+#define SCU_TLCR_RESERVED_MASK                     (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ *    registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+	/* 0x0000 TLCR */
+	u32 control;
+	/* 0x0004 TLADTR */
+	u32 arbitration_delay_timer;
+	/* 0x0008 TLTTMR */
+	u32 timer_test_mode;
+	/* 0x000C reserved */
+	u32 reserved_0C;
+	/* 0x0010 STPTLDARNI */
+	u32 stp_rni;
+	/* 0x0014 TLFEWPORCTRL */
+	u32 tlfe_wpo_read_control;
+	/* 0x0018 TLFEWPORDATA */
+	u32 tlfe_wpo_read_data;
+	/* 0x001C RXTLSSCSR1 */
+	u32 rxtl_single_step_control_status_1;
+	/* 0x0020 RXTLSSCSR2 */
+	u32 rxtl_single_step_control_status_2;
+	/* 0x0024 AWTRDDCR */
+	u32 tlfe_awt_retry_delay_debug_control;
+	/* Remainder of TL memory space */
+	u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET        0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET       0x0000
+#define SCU_SAS_LLSTA_OFFSET        0x0004
+#define SCU_SATA_PSELTOV_OFFSET     0x0008
+#define SCU_SAS_TIMETOV_OFFSET      0x0010
+#define SCU_SAS_LOSTOT_OFFSET       0x0014
+#define SCU_SAS_LNKTOV_OFFSET       0x0018
+#define SCU_SAS_PHYTOV_OFFSET       0x001C
+#define SCU_SAS_AFERCNT_OFFSET      0x0020
+#define SCU_SAS_WERCNT_OFFSET       0x0024
+#define SCU_SAS_TIID_OFFSET         0x0028
+#define SCU_SAS_TIDNH_OFFSET        0x002C
+#define SCU_SAS_TIDNL_OFFSET        0x0030
+#define SCU_SAS_TISSAH_OFFSET       0x0034
+#define SCU_SAS_TISSAL_OFFSET       0x0038
+#define SCU_SAS_TIPID_OFFSET        0x003C
+#define SCU_SAS_TIRES2_OFFSET       0x0040
+#define SCU_SAS_ADRSTA_OFFSET       0x0044
+#define SCU_SAS_MAWTTOV_OFFSET      0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET     0x0054
+#define SCU_SAS_RFCNT_OFFSET        0x0060
+#define SCU_SAS_TFCNT_OFFSET        0x0064
+#define SCU_SAS_RFDCNT_OFFSET       0x0068
+#define SCU_SAS_TFDCNT_OFFSET       0x006C
+#define SCU_SAS_LERCNT_OFFSET       0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET   0x0074
+#define SCU_SAS_CRERCNT_OFFSET      0x0078
+#define SCU_STPCTL_OFFSET           0x007C
+#define SCU_SAS_PCFG_OFFSET         0x0080
+#define SCU_SAS_CLKSM_OFFSET        0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET    0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET    0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET     0x0090
+#define SCU_SAS_COMINIT_OFFSET      0x0094
+#define SCU_SAS_COMWAKE_OFFSET      0x0098
+#define SCU_SAS_COMSAS_OFFSET       0x009C
+#define SCU_SAS_SFERCNT_OFFSET      0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET     0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET     0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET    0x00AC
+#define SCU_SAS_CNTCTL_OFFSET       0x00B0
+#define SCU_SAS_SSPTOV_OFFSET       0x00B4
+#define SCU_FTCTL_OFFSET            0x00B8
+#define SCU_FRCTL_OFFSET            0x00BC
+#define SCU_FTWMRK_OFFSET           0x00C0
+#define SCU_ENSPINUP_OFFSET         0x00C4
+#define SCU_SAS_TRNTOV_OFFSET       0x00C8
+#define SCU_SAS_PHYCAP_OFFSET       0x00CC
+#define SCU_SAS_PHYCTL_OFFSET       0x00D0
+#define SCU_SAS_LLCTL_OFFSET        0x00D8
+#define SCU_AFE_XCVRCR_OFFSET       0x00DC
+#define SCU_AFE_LUTCR_OFFSET        0x00E0
+
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT          (0UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK           (0x000000FFUL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT                 (8UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK                  (0x0000FF00UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT         (16UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK          (0x00FF0000UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT              (24UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK               (0xFF000000UL)
+
+#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2                   (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3                   (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT            (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK             (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT   (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK    (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK  (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT       (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK        (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED                             (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+	SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT                     (0xF0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED                    (0x1FF)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT                       (0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK                        (0x3FF)
+
+#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET      0x00B0 */
+#define SCU_PSZGCR_OFFSET           0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET    0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET         0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET         0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+	u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+	u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+	u32 port_selector_timeout;
+	u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+	u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+	u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+	u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+	u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+	u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+	u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+	u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+	u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+	u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+	u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+	u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+	u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+	u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+	u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+	u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+	u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+	u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+	u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+	u32 link_layer_hang_detection_timeout;
+	u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+	u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+	u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+	u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+	u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+	u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+	u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+	u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+	u32 stp_control;
+/* 0x0080 SAS_PCFG */
+	u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+	u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+	u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+	u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+	u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+	u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+	u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+	u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+	u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+	u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+	u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+	u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+	u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+	u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+	u32 ftx_control;
+/* 0x00BC FRCTL */
+	u32 frx_control;
+/* 0x00C0 FTWMRK */
+	u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+	u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+	u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+	u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+	u32 phy_control;
+	u32 reserved_d4;
+/* 0x00D8 LLCTL */
+	u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+	u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+	u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+	u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+	u32 receive_phycap;
+	u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+	u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+	u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+	u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+	u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+	u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+	u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+	u32 sas_primitive_timeout;
+	u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+	u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+	u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+	u32 pla_control;
+/* Remainder of memory space 896 bytes */
+	u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ *   u32   primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET         0x1400
+
+/* #define SCU_SGPIO_OFFSET         0x6000   // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET   0x0000
+#define SCU_SGPIO_SGPBR_OFFSET   0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET  0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET  0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET  0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET  0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+	u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+	u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+	u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+	u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+	u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+	u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+	u32 vendor_specific_code;
+/* 0x001C Reserved */
+	u32 reserved_001c;
+/* 0x0020 SGPIO_SGODSR */
+	u32 output_data_select[8];
+/* Remainder of memory space 256 bytes */
+	u32 reserved_1444_14ff[0x30];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define     SCU_VIIT_BASE     0x1c00
+
+struct scu_viit_registers {
+	u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE               0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET      0x0000
+#define SCU_PTSG_RTCR_OFFSET        0x0004
+#define SCU_PTSG_RTCCR_OFFSET       0x0008
+#define SCU_PTSG_PTS0CR_OFFSET      0x0010
+#define SCU_PTSG_PTS0SR_OFFSET      0x0014
+#define SCU_PTSG_PTS1CR_OFFSET      0x0018
+#define SCU_PTSG_PTS1SR_OFFSET      0x001C
+#define SCU_PTSG_PTS2CR_OFFSET      0x0020
+#define SCU_PTSG_PTS2SR_OFFSET      0x0024
+#define SCU_PTSG_PTS3CR_OFFSET      0x0028
+#define SCU_PTSG_PTS3SR_OFFSET      0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET    0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET    0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET    0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET    0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET    0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET   0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ *    for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+	u32 control;
+	u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ *    Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+	u32 control;
+/* 0x0004 RTCR */
+	u32 real_time_clock;
+/* 0x0008 RTCCR */
+	u32 real_time_clock_control;
+/* 0x000C */
+	u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+	struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+	u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+	u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+	u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+	u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET        0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE                  0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+	/* 0x0000 AFE_XCVR_CTRL0 */
+	u32 afe_xcvr_control0;
+	/* 0x0004 AFE_XCVR_CTRL1 */
+	u32 afe_xcvr_control1;
+	/* 0x0008 */
+	u32 reserved_0008;
+	/* 0x000c afe_dfx_rx_control0 */
+	u32 afe_dfx_rx_control0;
+	/* 0x0010 AFE_DFX_RX_CTRL1 */
+	u32 afe_dfx_rx_control1;
+	/* 0x0014 */
+	u32 reserved_0014;
+	/* 0x0018 AFE_DFX_RX_STS0 */
+	u32 afe_dfx_rx_status0;
+	/* 0x001c AFE_DFX_RX_STS1 */
+	u32 afe_dfx_rx_status1;
+	/* 0x0020 */
+	u32 reserved_0020;
+	/* 0x0024 AFE_TX_CTRL */
+	u32 afe_tx_control;
+	/* 0x0028 AFE_TX_AMP_CTRL0 */
+	u32 afe_tx_amp_control0;
+	/* 0x002c AFE_TX_AMP_CTRL1 */
+	u32 afe_tx_amp_control1;
+	/* 0x0030 AFE_TX_AMP_CTRL2 */
+	u32 afe_tx_amp_control2;
+	/* 0x0034 AFE_TX_AMP_CTRL3 */
+	u32 afe_tx_amp_control3;
+	/* 0x0038 afe_tx_ssc_control */
+	u32 afe_tx_ssc_control;
+	/* 0x003c */
+	u32 reserved_003c;
+	/* 0x0040 AFE_RX_SSC_CTRL0 */
+	u32 afe_rx_ssc_control0;
+	/* 0x0044 AFE_RX_SSC_CTRL1 */
+	u32 afe_rx_ssc_control1;
+	/* 0x0048 AFE_RX_SSC_CTRL2 */
+	u32 afe_rx_ssc_control2;
+	/* 0x004c AFE_RX_EQ_STS0 */
+	u32 afe_rx_eq_status0;
+	/* 0x0050 AFE_RX_EQ_STS1 */
+	u32 afe_rx_eq_status1;
+	/* 0x0054 AFE_RX_CDR_STS */
+	u32 afe_rx_cdr_status;
+	/* 0x0058 */
+	u32 reserved_0058;
+	/* 0x005c AFE_CHAN_CTRL */
+	u32 afe_channel_control;
+	/* 0x0060-0x006c */
+	u32 reserved_0060_006c[0x04];
+	/* 0x0070 AFE_XCVR_EC_STS0 */
+	u32 afe_xcvr_error_capture_status0;
+	/* 0x0074 AFE_XCVR_EC_STS1 */
+	u32 afe_xcvr_error_capture_status1;
+	/* 0x0078 AFE_XCVR_EC_STS2 */
+	u32 afe_xcvr_error_capture_status2;
+	/* 0x007c afe_xcvr_ec_status3 */
+	u32 afe_xcvr_error_capture_status3;
+	/* 0x0080 AFE_XCVR_EC_STS4 */
+	u32 afe_xcvr_error_capture_status4;
+	/* 0x0084 AFE_XCVR_EC_STS5 */
+	u32 afe_xcvr_error_capture_status5;
+	/* 0x0088-0x00fc */
+	u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+	/* 0Xe000 AFE_BIAS_CTRL */
+	u32 afe_bias_control;
+	u32 reserved_0004;
+	/* 0x0008 AFE_PLL_CTRL0 */
+	u32 afe_pll_control0;
+	/* 0x000c AFE_PLL_CTRL1 */
+	u32 afe_pll_control1;
+	/* 0x0010 AFE_PLL_CTRL2 */
+	u32 afe_pll_control2;
+	/* 0x0014 AFE_CB_STS */
+	u32 afe_common_block_status;
+	/* 0x0018-0x007c */
+	u32 reserved_18_7c[0x1a];
+	/* 0x0080 AFE_PMSN_MCTRL0 */
+	u32 afe_pmsn_master_control0;
+	/* 0x0084 AFE_PMSN_MCTRL1 */
+	u32 afe_pmsn_master_control1;
+	/* 0x0088 AFE_PMSN_MCTRL2 */
+	u32 afe_pmsn_master_control2;
+	/* 0x008C-0x00fc */
+	u32 reserved_008c_00fc[0x1D];
+	/* 0x0100 AFE_DFX_MST_CTRL0 */
+	u32 afe_dfx_master_control0;
+	/* 0x0104 AFE_DFX_MST_CTRL1 */
+	u32 afe_dfx_master_control1;
+	/* 0x0108 AFE_DFX_DCL_CTRL */
+	u32 afe_dfx_dcl_control;
+	/* 0x010c AFE_DFX_DMON_CTRL */
+	u32 afe_dfx_digital_monitor_control;
+	/* 0x0110 AFE_DFX_AMONP_CTRL */
+	u32 afe_dfx_analog_p_monitor_control;
+	/* 0x0114 AFE_DFX_AMONN_CTRL */
+	u32 afe_dfx_analog_n_monitor_control;
+	/* 0x0118 AFE_DFX_NTL_STS */
+	u32 afe_dfx_ntl_status;
+	/* 0x011c AFE_DFX_FIFO_STS0 */
+	u32 afe_dfx_fifo_status0;
+	/* 0x0120 AFE_DFX_FIFO_STS1 */
+	u32 afe_dfx_fifo_status1;
+	/* 0x0124 AFE_DFX_MPAT_CTRL */
+	u32 afe_dfx_master_pattern_control;
+	/* 0x0128 AFE_DFX_P0_CTRL */
+	u32 afe_dfx_p0_control;
+	/* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+	u32 afe_dfx_p0_data[32];
+	/* 0x01ac */
+	u32 reserved_01ac;
+	/* 0x01b0-0x020c AFE_DFX_P0_IRx */
+	u32 afe_dfx_p0_instruction[24];
+	/* 0x0210 */
+	u32 reserved_0210;
+	/* 0x0214 AFE_DFX_P1_CTRL */
+	u32 afe_dfx_p1_control;
+	/* 0x0218-0x245 AFE_DFX_P1_DRx */
+	u32 afe_dfx_p1_data[16];
+	/* 0x0258-0x029c */
+	u32 reserved_0258_029c[0x12];
+	/* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+	u32 afe_dfx_p1_instruction[8];
+	/* 0x02c0-0x2fc */
+	u32 reserved_02c0_02fc[0x10];
+	/* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+	u32 afe_dfx_tx_pmsn_control;
+	/* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+	u32 afe_dfx_rx_pmsn_control;
+	u32 reserved_0308;
+	/* 0x030c AFE_DFX_NOA_CTRL0 */
+	u32 afe_dfx_noa_control0;
+	/* 0x0310 AFE_DFX_NOA_CTRL1 */
+	u32 afe_dfx_noa_control1;
+	/* 0x0314 AFE_DFX_NOA_CTRL2 */
+	u32 afe_dfx_noa_control2;
+	/* 0x0318 AFE_DFX_NOA_CTRL3 */
+	u32 afe_dfx_noa_control3;
+	/* 0x031c AFE_DFX_NOA_CTRL4 */
+	u32 afe_dfx_noa_control4;
+	/* 0x0320 AFE_DFX_NOA_CTRL5 */
+	u32 afe_dfx_noa_control5;
+	/* 0x0324 AFE_DFX_NOA_CTRL6 */
+	u32 afe_dfx_noa_control6;
+	/* 0x0328 AFE_DFX_NOA_CTRL7 */
+	u32 afe_dfx_noa_control7;
+	/* 0x032c-0x07fc */
+	u32 reserved_032c_07fc[0x135];
+
+	/* 0x0800-0x0bfc */
+	struct scu_afe_transceiver scu_afe_xcvr[4];
+
+	/* 0x0c00-0x0ffc */
+	u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+	u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+	u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ *    included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+	u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ *    read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+	u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ *    read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+	u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS  256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+	u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+	u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+	u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+	u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ *    registers with the LL registers so we must place them adjcent to make the
+ *    array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+	struct scu_transport_layer_registers tl;
+	struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ *     These registers are unique to each protocol engine group.  There can be
+ *    at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+	struct transport_link_layer_pair pe[4];
+	struct scu_port_task_scheduler_group_registers ptsg;
+	struct scu_protocol_engine_group_registers peg;
+	struct scu_sgpio_registers sgpio;
+	u32 reserved_01500_1BFF[0x1C0];
+	struct scu_viit_entry viit[64];
+	struct scu_zone_partition_table zpt0;
+	struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ *    on that compile option. All of these registers are in the memory mapped
+ *    space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+	/* 0x0000 - PEG 0 */
+	struct scu_peg_registers peg0;
+
+	/* 0x6000 - SDMA and Miscellaneous */
+	struct scu_sdma_registers sdma;
+	struct scu_completion_ram cram;
+	struct scu_frame_buffer_ram fbram;
+	u32 reserved_6800_69FF[0x80];
+	struct noa_protocol_engine_partition noa_pe;
+	struct noa_hub_partition noa_hub;
+	struct noa_host_interface_partition noa_if;
+	u32 reserved_6d00_7fff[0x4c0];
+
+	/* 0x8000 - PEG 1 */
+	struct scu_peg_registers peg1;
+
+	/* 0xE000 - AFE Registers */
+	struct scu_afe_registers afe;
+
+	/* 0xF000 - reserved */
+	u32 reserved_f000_211fff[0x80c00];
+
+	/* 0x212000 - scratch RAM */
+	struct scu_scratch_ram scratch_ram;
+};
+
+#endif   /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 0000000..cc51f38
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1726 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include <linux/bitops.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+#undef C
+#define C(a) (#a)
+const char *dev_state_name(enum sci_remote_device_states state)
+{
+	static const char * const strings[] = REMOTE_DEV_STATES;
+
+	return strings[state];
+}
+#undef C
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+					  enum sci_remote_node_suspension_reasons reason)
+{
+	return sci_remote_node_context_suspend(&idev->rnc, reason,
+					       SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ *    remote device is ready. We mark the isci device as ready and signal the
+ *    waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+	dev_dbg(&ihost->pdev->dev,
+		"%s: idev = %p\n", __func__, idev);
+
+	clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+	set_bit(IDEV_IO_READY, &idev->flags);
+	if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+		wake_up(&ihost->eventq);
+}
+
+static enum sci_status sci_remote_device_terminate_req(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	int check_abort,
+	struct isci_request *ireq)
+{
+	if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+	    (ireq->target_device != idev) ||
+	    (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
+		return SCI_SUCCESS;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
+		__func__, idev, idev->flags, ireq, ireq->target_device);
+
+	set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+
+	return sci_controller_terminate_request(ihost, idev, ireq);
+}
+
+static enum sci_status sci_remote_device_terminate_reqs_checkabort(
+	struct isci_remote_device *idev,
+	int chk)
+{
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	enum sci_status status  = SCI_SUCCESS;
+	u32 i;
+
+	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+		struct isci_request *ireq = ihost->reqs[i];
+		enum sci_status s;
+
+		s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
+		if (s != SCI_SUCCESS)
+			status = s;
+	}
+	return status;
+}
+
+static bool isci_compare_suspendcount(
+	struct isci_remote_device *idev,
+	u32 localcount)
+{
+	smp_rmb();
+
+	/* Check for a change in the suspend count, or the RNC
+	 * being destroyed.
+	 */
+	return (localcount != idev->rnc.suspend_count)
+	    || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+}
+
+static bool isci_check_reqterm(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq,
+	u32 localcount)
+{
+	unsigned long flags;
+	bool res;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	res = isci_compare_suspendcount(idev, localcount)
+		&& !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return res;
+}
+
+static bool isci_check_devempty(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	u32 localcount)
+{
+	unsigned long flags;
+	bool res;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	res = isci_compare_suspendcount(idev, localcount)
+		&& idev->started_request_count == 0;
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return res;
+}
+
+enum sci_status isci_remote_device_terminate_requests(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq)
+{
+	enum sci_status status = SCI_SUCCESS;
+	unsigned long flags;
+	u32 rnc_suspend_count;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (isci_get_device(idev) == NULL) {
+		dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
+			__func__, idev);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		status = SCI_FAILURE;
+	} else {
+		/* If already suspended, don't wait for another suspension. */
+		smp_rmb();
+		rnc_suspend_count
+			= sci_remote_node_context_is_suspended(&idev->rnc)
+				? 0 : idev->rnc.suspend_count;
+
+		dev_dbg(&ihost->pdev->dev,
+			"%s: idev=%p, ireq=%p; started_request_count=%d, "
+				"rnc_suspend_count=%d, rnc.suspend_count=%d"
+				"about to wait\n",
+			__func__, idev, ireq, idev->started_request_count,
+			rnc_suspend_count, idev->rnc.suspend_count);
+
+		#define MAX_SUSPEND_MSECS 10000
+		if (ireq) {
+			/* Terminate a specific TC. */
+			set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+			sci_remote_device_terminate_req(ihost, idev, 0, ireq);
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+			if (!wait_event_timeout(ihost->eventq,
+						isci_check_reqterm(ihost, idev, ireq,
+								   rnc_suspend_count),
+						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+				dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
+					 __func__, ihost->id);
+				dev_dbg(&ihost->pdev->dev,
+					 "%s: ******* Timeout waiting for "
+					 "suspend; idev=%p, current state %s; "
+					 "started_request_count=%d, flags=%lx\n\t"
+					 "rnc_suspend_count=%d, rnc.suspend_count=%d "
+					 "RNC: current state %s, current "
+					 "suspend_type %x dest state %d;\n"
+					 "ireq=%p, ireq->flags = %lx\n",
+					 __func__, idev,
+					 dev_state_name(idev->sm.current_state_id),
+					 idev->started_request_count, idev->flags,
+					 rnc_suspend_count, idev->rnc.suspend_count,
+					 rnc_state_name(idev->rnc.sm.current_state_id),
+					 idev->rnc.suspend_type,
+					 idev->rnc.destination_state,
+					 ireq, ireq->flags);
+			}
+			spin_lock_irqsave(&ihost->scic_lock, flags);
+			clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+			if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+				isci_free_tag(ihost, ireq->io_tag);
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		} else {
+			/* Terminate all TCs. */
+			sci_remote_device_terminate_requests(idev);
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+			if (!wait_event_timeout(ihost->eventq,
+						isci_check_devempty(ihost, idev,
+								    rnc_suspend_count),
+						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+				dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
+					 __func__, ihost->id);
+				dev_dbg(&ihost->pdev->dev,
+					"%s: ******* Timeout waiting for "
+					"suspend; idev=%p, current state %s; "
+					"started_request_count=%d, flags=%lx\n\t"
+					"rnc_suspend_count=%d, "
+					"RNC: current state %s, "
+					"rnc.suspend_count=%d, current "
+					"suspend_type %x dest state %d\n",
+					__func__, idev,
+					dev_state_name(idev->sm.current_state_id),
+					idev->started_request_count, idev->flags,
+					rnc_suspend_count,
+					rnc_state_name(idev->rnc.sm.current_state_id),
+					idev->rnc.suspend_count,
+					idev->rnc.suspend_type,
+					idev->rnc.destination_state);
+			}
+		}
+		dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
+			__func__, idev);
+		isci_put_device(idev);
+	}
+	return status;
+}
+
+/**
+* isci_remote_device_not_ready() - This function is called by the ihost when
+*    the remote device is not ready. We mark the isci device as ready (not
+*    "ready_for_io") and signal the waiting proccess.
+* @isci_host: This parameter specifies the isci host object.
+* @isci_device: This parameter specifies the remote device
+*
+* sci_lock is held on entrance to this function.
+*/
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+					 struct isci_remote_device *idev,
+					 u32 reason)
+{
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
+
+	switch (reason) {
+	case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+		set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+		/* Suspend the remote device so the I/O can be terminated. */
+		sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+		/* Kill all outstanding requests for the device. */
+		sci_remote_device_terminate_requests(idev);
+
+		/* Fall through into the default case... */
+	default:
+		clear_bit(IDEV_IO_READY, &idev->flags);
+		break;
+	}
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+	struct isci_remote_device *idev = _dev;
+
+	BUG_ON(idev->started_request_count != 0);
+	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+enum sci_status sci_remote_device_terminate_requests(
+	struct isci_remote_device *idev)
+{
+	return sci_remote_device_terminate_reqs_checkabort(idev, 0);
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+					u32 timeout)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+
+	switch (state) {
+	case SCI_DEV_INITIAL:
+	case SCI_DEV_FAILED:
+	case SCI_DEV_FINAL:
+	default:
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_DEV_STOPPED:
+		return SCI_SUCCESS;
+	case SCI_DEV_STARTING:
+		/* device not started so there had better be no requests */
+		BUG_ON(idev->started_request_count != 0);
+		sci_remote_node_context_destruct(&idev->rnc,
+						      rnc_destruct_done, idev);
+		/* Transition to the stopping state and wait for the
+		 * remote node to complete being posted and invalidated.
+		 */
+		sci_change_state(sm, SCI_DEV_STOPPING);
+		return SCI_SUCCESS;
+	case SCI_DEV_READY:
+	case SCI_STP_DEV_IDLE:
+	case SCI_STP_DEV_CMD:
+	case SCI_STP_DEV_NCQ:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_STP_DEV_AWAIT_RESET:
+	case SCI_SMP_DEV_IDLE:
+	case SCI_SMP_DEV_CMD:
+		sci_change_state(sm, SCI_DEV_STOPPING);
+		if (idev->started_request_count == 0)
+			sci_remote_node_context_destruct(&idev->rnc,
+							 rnc_destruct_done,
+							 idev);
+		else {
+			sci_remote_device_suspend(
+				idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+			sci_remote_device_terminate_requests(idev);
+		}
+		return SCI_SUCCESS;
+	case SCI_DEV_STOPPING:
+		/* All requests should have been terminated, but if there is an
+		 * attempt to stop a device already in the stopping state, then
+		 * try again to terminate.
+		 */
+		return sci_remote_device_terminate_requests(idev);
+	case SCI_DEV_RESETTING:
+		sci_change_state(sm, SCI_DEV_STOPPING);
+		return SCI_SUCCESS;
+	}
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+
+	switch (state) {
+	case SCI_DEV_INITIAL:
+	case SCI_DEV_STOPPED:
+	case SCI_DEV_STARTING:
+	case SCI_SMP_DEV_IDLE:
+	case SCI_SMP_DEV_CMD:
+	case SCI_DEV_STOPPING:
+	case SCI_DEV_FAILED:
+	case SCI_DEV_RESETTING:
+	case SCI_DEV_FINAL:
+	default:
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_DEV_READY:
+	case SCI_STP_DEV_IDLE:
+	case SCI_STP_DEV_CMD:
+	case SCI_STP_DEV_NCQ:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_STP_DEV_AWAIT_RESET:
+		sci_change_state(sm, SCI_DEV_RESETTING);
+		return SCI_SUCCESS;
+	}
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+
+	if (state != SCI_DEV_RESETTING) {
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_change_state(sm, SCI_DEV_READY);
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+						     u32 frame_index)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	enum sci_status status;
+
+	switch (state) {
+	case SCI_DEV_INITIAL:
+	case SCI_DEV_STOPPED:
+	case SCI_DEV_STARTING:
+	case SCI_STP_DEV_IDLE:
+	case SCI_SMP_DEV_IDLE:
+	case SCI_DEV_FINAL:
+	default:
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		/* Return the frame back to the controller */
+		sci_controller_release_frame(ihost, frame_index);
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_DEV_READY:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_STP_DEV_AWAIT_RESET:
+	case SCI_DEV_STOPPING:
+	case SCI_DEV_FAILED:
+	case SCI_DEV_RESETTING: {
+		struct isci_request *ireq;
+		struct ssp_frame_hdr hdr;
+		void *frame_header;
+		ssize_t word_cnt;
+
+		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								       frame_index,
+								       &frame_header);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		word_cnt = sizeof(hdr) / sizeof(u32);
+		sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+		ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+		if (ireq && ireq->target_device == idev) {
+			/* The IO request is now in charge of releasing the frame */
+			status = sci_io_request_frame_handler(ireq, frame_index);
+		} else {
+			/* We could not map this tag to a valid IO
+			 * request Just toss the frame and continue
+			 */
+			sci_controller_release_frame(ihost, frame_index);
+		}
+		break;
+	}
+	case SCI_STP_DEV_NCQ: {
+		struct dev_to_host_fis *hdr;
+
+		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								       frame_index,
+								       (void **)&hdr);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		if (hdr->fis_type == FIS_SETDEVBITS &&
+		    (hdr->status & ATA_ERR)) {
+			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+			/* TODO Check sactive and complete associated IO if any. */
+			sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+		} else if (hdr->fis_type == FIS_REGD2H &&
+			   (hdr->status & ATA_ERR)) {
+			/*
+			 * Some devices return D2H FIS when an NCQ error is detected.
+			 * Treat this like an SDB error FIS ready reason.
+			 */
+			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+			sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+		} else
+			status = SCI_FAILURE;
+
+		sci_controller_release_frame(ihost, frame_index);
+		break;
+	}
+	case SCI_STP_DEV_CMD:
+	case SCI_SMP_DEV_CMD:
+		/* The device does not process any UF received from the hardware while
+		 * in this state.  All unsolicited frames are forwarded to the io request
+		 * object.
+		 */
+		status = sci_io_request_frame_handler(idev->working_request, frame_index);
+		break;
+	}
+
+	return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+
+	switch (state) {
+	case SCI_DEV_READY:
+	case SCI_STP_DEV_IDLE:
+	case SCI_STP_DEV_CMD:
+	case SCI_STP_DEV_NCQ:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_STP_DEV_AWAIT_RESET:
+	case SCI_SMP_DEV_IDLE:
+	case SCI_SMP_DEV_CMD:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/*
+ * called once the remote node context has transisitioned to a ready
+ * state (after suspending RX and/or TX due to early D2H fis)
+ */
+static void atapi_remote_device_resume_done(void *_dev)
+{
+	struct isci_remote_device *idev = _dev;
+	struct isci_request *ireq = idev->working_request;
+
+	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+						     u32 event_code)
+{
+	enum sci_status status;
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+
+	switch (scu_get_event_type(event_code)) {
+	case SCU_EVENT_TYPE_RNC_OPS_MISC:
+	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+		status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+		break;
+	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+		if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+			status = SCI_SUCCESS;
+
+			/* Suspend the associated RNC */
+			sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+			dev_dbg(scirdev_to_dev(idev),
+				"%s: device: %p event code: %x: %s\n",
+				__func__, idev, event_code,
+				is_remote_device_ready(idev)
+				? "I_T_Nexus_Timeout event"
+				: "I_T_Nexus_Timeout event in wrong state");
+
+			break;
+		}
+	/* Else, fall through and treat as unhandled... */
+	default:
+		dev_dbg(scirdev_to_dev(idev),
+			"%s: device: %p event code: %x: %s\n",
+			__func__, idev, event_code,
+			is_remote_device_ready(idev)
+			? "unexpected event"
+			: "unexpected event in wrong state");
+		status = SCI_FAILURE_INVALID_STATE;
+		break;
+	}
+
+	if (status != SCI_SUCCESS)
+		return status;
+
+	/* Decode device-specific states that may require an RNC resume during
+	 * normal operation.  When the abort path is active, these resumes are
+	 * managed when the abort path exits.
+	 */
+	if (state == SCI_STP_DEV_ATAPI_ERROR) {
+		/* For ATAPI error state resume the RNC right away. */
+		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
+			return sci_remote_node_context_resume(&idev->rnc,
+							      atapi_remote_device_resume_done,
+							      idev);
+		}
+	}
+
+	if (state == SCI_STP_DEV_IDLE) {
+
+		/* We pick up suspension events to handle specifically to this
+		 * state. We resume the RNC right away.
+		 */
+		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+			status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+	}
+
+	return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+						 struct isci_request *ireq,
+						 enum sci_status status)
+{
+	struct isci_port *iport = idev->owning_port;
+
+	/* cleanup requests that failed after starting on the port */
+	if (status != SCI_SUCCESS)
+		sci_port_complete_io(iport, idev, ireq);
+	else {
+		kref_get(&idev->kref);
+		idev->started_request_count++;
+	}
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+						struct isci_remote_device *idev,
+						struct isci_request *ireq)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+	struct isci_port *iport = idev->owning_port;
+	enum sci_status status;
+
+	switch (state) {
+	case SCI_DEV_INITIAL:
+	case SCI_DEV_STOPPED:
+	case SCI_DEV_STARTING:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_DEV_STOPPING:
+	case SCI_DEV_FAILED:
+	case SCI_DEV_RESETTING:
+	case SCI_DEV_FINAL:
+	default:
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_DEV_READY:
+		/* attempt to start an io request for this device object. The remote
+		 * device object will issue the start request for the io and if
+		 * successful it will start the request for the port object then
+		 * increment its own request count.
+		 */
+		status = sci_port_start_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		status = sci_request_start(ireq);
+		break;
+	case SCI_STP_DEV_IDLE: {
+		/* handle the start io operation for a sata device that is in
+		 * the command idle state. - Evalute the type of IO request to
+		 * be started - If its an NCQ request change to NCQ substate -
+		 * If its any other command change to the CMD substate
+		 *
+		 * If this is a softreset we may want to have a different
+		 * substate.
+		 */
+		enum sci_remote_device_states new_state;
+		struct sas_task *task = isci_request_access_task(ireq);
+
+		status = sci_port_start_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		status = sci_request_start(ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		if (task->ata_task.use_ncq)
+			new_state = SCI_STP_DEV_NCQ;
+		else {
+			idev->working_request = ireq;
+			new_state = SCI_STP_DEV_CMD;
+		}
+		sci_change_state(sm, new_state);
+		break;
+	}
+	case SCI_STP_DEV_NCQ: {
+		struct sas_task *task = isci_request_access_task(ireq);
+
+		if (task->ata_task.use_ncq) {
+			status = sci_port_start_io(iport, idev, ireq);
+			if (status != SCI_SUCCESS)
+				return status;
+
+			status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+			if (status != SCI_SUCCESS)
+				break;
+
+			status = sci_request_start(ireq);
+		} else
+			return SCI_FAILURE_INVALID_STATE;
+		break;
+	}
+	case SCI_STP_DEV_AWAIT_RESET:
+		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+	case SCI_SMP_DEV_IDLE:
+		status = sci_port_start_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		status = sci_request_start(ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		idev->working_request = ireq;
+		sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+		break;
+	case SCI_STP_DEV_CMD:
+	case SCI_SMP_DEV_CMD:
+		/* device is already handling a command it can not accept new commands
+		 * until this one is complete.
+		 */
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	sci_remote_device_start_request(idev, ireq, status);
+	return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+					  struct isci_remote_device *idev,
+					  struct isci_request *ireq)
+{
+	enum sci_status status;
+
+	status = sci_request_complete(ireq);
+	if (status != SCI_SUCCESS)
+		return status;
+
+	status = sci_port_complete_io(iport, idev, ireq);
+	if (status != SCI_SUCCESS)
+		return status;
+
+	sci_remote_device_decrement_request_count(idev);
+	return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+						   struct isci_remote_device *idev,
+						   struct isci_request *ireq)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+	struct isci_port *iport = idev->owning_port;
+	enum sci_status status;
+
+	switch (state) {
+	case SCI_DEV_INITIAL:
+	case SCI_DEV_STOPPED:
+	case SCI_DEV_STARTING:
+	case SCI_STP_DEV_IDLE:
+	case SCI_SMP_DEV_IDLE:
+	case SCI_DEV_FAILED:
+	case SCI_DEV_FINAL:
+	default:
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_DEV_READY:
+	case SCI_STP_DEV_AWAIT_RESET:
+	case SCI_DEV_RESETTING:
+		status = common_complete_io(iport, idev, ireq);
+		break;
+	case SCI_STP_DEV_CMD:
+	case SCI_STP_DEV_NCQ:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_STP_DEV_ATAPI_ERROR:
+		status = common_complete_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+			/* This request causes hardware error, device needs to be Lun Reset.
+			 * So here we force the state machine to IDLE state so the rest IOs
+			 * can reach RNC state handler, these IOs will be completed by RNC with
+			 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+			 */
+			sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+		} else if (idev->started_request_count == 0)
+			sci_change_state(sm, SCI_STP_DEV_IDLE);
+		break;
+	case SCI_SMP_DEV_CMD:
+		status = common_complete_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			break;
+		sci_change_state(sm, SCI_SMP_DEV_IDLE);
+		break;
+	case SCI_DEV_STOPPING:
+		status = common_complete_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			break;
+
+		if (idev->started_request_count == 0)
+			sci_remote_node_context_destruct(&idev->rnc,
+							 rnc_destruct_done,
+							 idev);
+		break;
+	}
+
+	if (status != SCI_SUCCESS)
+		dev_err(scirdev_to_dev(idev),
+			"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+			"could not complete\n", __func__, iport,
+			idev, ireq, status);
+	else
+		isci_put_device(idev);
+
+	return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+	struct isci_remote_device *idev = dev;
+
+	/* we need to check if this request is still valid to continue. */
+	if (idev->working_request)
+		sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+						  struct isci_remote_device *idev,
+						  struct isci_request *ireq)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+	struct isci_port *iport = idev->owning_port;
+	enum sci_status status;
+
+	switch (state) {
+	case SCI_DEV_INITIAL:
+	case SCI_DEV_STOPPED:
+	case SCI_DEV_STARTING:
+	case SCI_SMP_DEV_IDLE:
+	case SCI_SMP_DEV_CMD:
+	case SCI_DEV_STOPPING:
+	case SCI_DEV_FAILED:
+	case SCI_DEV_RESETTING:
+	case SCI_DEV_FINAL:
+	default:
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	case SCI_STP_DEV_IDLE:
+	case SCI_STP_DEV_CMD:
+	case SCI_STP_DEV_NCQ:
+	case SCI_STP_DEV_NCQ_ERROR:
+	case SCI_STP_DEV_AWAIT_RESET:
+		status = sci_port_start_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		status = sci_request_start(ireq);
+		if (status != SCI_SUCCESS)
+			goto out;
+
+		/* Note: If the remote device state is not IDLE this will
+		 * replace the request that probably resulted in the task
+		 * management request.
+		 */
+		idev->working_request = ireq;
+		sci_change_state(sm, SCI_STP_DEV_CMD);
+
+		/* The remote node context must cleanup the TCi to NCQ mapping
+		 * table.  The only way to do this correctly is to either write
+		 * to the TLCR register or to invalidate and repost the RNC. In
+		 * either case the remote node context state machine will take
+		 * the correct action when the remote node context is suspended
+		 * and later resumed.
+		 */
+		sci_remote_device_suspend(idev,
+					  SCI_SW_SUSPEND_LINKHANG_DETECT);
+
+		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+				sci_remote_device_continue_request, idev);
+
+	out:
+		sci_remote_device_start_request(idev, ireq, status);
+		/* We need to let the controller start request handler know that
+		 * it can't post TC yet. We will provide a callback function to
+		 * post TC when RNC gets resumed.
+		 */
+		return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+	case SCI_DEV_READY:
+		status = sci_port_start_io(iport, idev, ireq);
+		if (status != SCI_SUCCESS)
+			return status;
+
+		/* Resume the RNC as needed: */
+		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+							    NULL, NULL);
+		if (status != SCI_SUCCESS)
+			break;
+
+		status = sci_request_start(ireq);
+		break;
+	}
+	sci_remote_device_start_request(idev, ireq, status);
+
+	return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+	struct isci_port *iport = idev->owning_port;
+	u32 context;
+
+	context = request |
+		  (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+		  (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+		  idev->rnc.remote_node_index;
+
+	sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state.  This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+	struct isci_remote_device *idev = _dev;
+
+	if (is_remote_device_ready(idev))
+		return;
+
+	/* go 'ready' if we are not already in a ready state */
+	sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+	struct isci_remote_device *idev = _dev;
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	/* For NCQ operation we do not issue a isci_remote_device_not_ready().
+	 * As a result, avoid sending the ready notification.
+	 */
+	if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+		isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+	/* Initial state is a transitional state to the stopped state */
+	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource.  As such, they must be
+ * protected.  Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+	struct isci_host *ihost;
+
+	if (state != SCI_DEV_STOPPED) {
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	ihost = idev->owning_port->owning_controller;
+	sci_controller_free_remote_node_context(ihost, idev,
+						     idev->rnc.remote_node_index);
+	idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+	sci_change_state(sm, SCI_DEV_FINAL);
+
+	return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p\n", __func__, idev);
+
+	/* There should not be any outstanding io's. All paths to
+	 * here should go through isci_remote_device_nuke_requests.
+	 * If we hit this condition, we will need a way to complete
+	 * io requests in process */
+	BUG_ON(idev->started_request_count > 0);
+
+	sci_remote_device_destruct(idev);
+	list_del_init(&idev->node);
+	isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	u32 prev_state;
+
+	/* If we are entering from the stopping state let the SCI User know that
+	 * the stop operation has completed.
+	 */
+	prev_state = idev->sm.previous_state_id;
+	if (prev_state == SCI_DEV_STOPPING)
+		isci_remote_device_deconstruct(ihost, idev);
+
+	sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	isci_remote_device_not_ready(ihost, idev,
+				     SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	struct domain_device *dev = idev->domain_dev;
+
+	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+		sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+	} else if (dev_is_expander(dev)) {
+		sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+	} else
+		isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct domain_device *dev = idev->domain_dev;
+
+	if (dev->dev_type == SAS_END_DEVICE) {
+		struct isci_host *ihost = idev->owning_port->owning_controller;
+
+		isci_remote_device_not_ready(ihost, idev,
+					     SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+	}
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p\n", __func__, idev);
+
+	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p\n", __func__, idev);
+
+	sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+	idev->working_request = NULL;
+	if (sci_remote_node_context_is_ready(&idev->rnc)) {
+		/*
+		 * Since the RNC is ready, it's alright to finish completion
+		 * processing (e.g. signal the remote device is ready). */
+		sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+	} else {
+		sci_remote_node_context_resume(&idev->rnc,
+			sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+			idev);
+	}
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	BUG_ON(idev->working_request == NULL);
+
+	isci_remote_device_not_ready(ihost, idev,
+				     SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+		isci_remote_device_not_ready(ihost, idev,
+					     idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	BUG_ON(idev->working_request == NULL);
+
+	isci_remote_device_not_ready(ihost, idev,
+				     SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+	idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+	[SCI_DEV_INITIAL] = {
+		.enter_state = sci_remote_device_initial_state_enter,
+	},
+	[SCI_DEV_STOPPED] = {
+		.enter_state = sci_remote_device_stopped_state_enter,
+	},
+	[SCI_DEV_STARTING] = {
+		.enter_state = sci_remote_device_starting_state_enter,
+	},
+	[SCI_DEV_READY] = {
+		.enter_state = sci_remote_device_ready_state_enter,
+		.exit_state  = sci_remote_device_ready_state_exit
+	},
+	[SCI_STP_DEV_IDLE] = {
+		.enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+	},
+	[SCI_STP_DEV_CMD] = {
+		.enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+	},
+	[SCI_STP_DEV_NCQ] = { },
+	[SCI_STP_DEV_NCQ_ERROR] = {
+		.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+	},
+	[SCI_STP_DEV_ATAPI_ERROR] = { },
+	[SCI_STP_DEV_AWAIT_RESET] = { },
+	[SCI_SMP_DEV_IDLE] = {
+		.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+	},
+	[SCI_SMP_DEV_CMD] = {
+		.enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+		.exit_state  = sci_smp_remote_device_ready_cmd_substate_exit,
+	},
+	[SCI_DEV_STOPPING] = { },
+	[SCI_DEV_FAILED] = { },
+	[SCI_DEV_RESETTING] = {
+		.enter_state = sci_remote_device_resetting_state_enter,
+		.exit_state  = sci_remote_device_resetting_state_exit
+	},
+	[SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct().  sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+				  struct isci_remote_device *idev)
+{
+	idev->owning_port = iport;
+	idev->started_request_count = 0;
+
+	sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+	sci_remote_node_context_construct(&idev->rnc,
+					       SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object.  Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+						       struct isci_remote_device *idev)
+{
+	enum sci_status status;
+	struct sci_port_properties properties;
+
+	sci_remote_device_construct(iport, idev);
+
+	sci_port_get_properties(iport, &properties);
+	/* Get accurate port width from port's phy mask for a DA device. */
+	idev->device_port_width = hweight32(properties.phy_mask);
+
+	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+							     idev,
+							     &idev->rnc.remote_node_index);
+
+	if (status != SCI_SUCCESS)
+		return status;
+
+	idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+	return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+						       struct isci_remote_device *idev)
+{
+	struct domain_device *dev = idev->domain_dev;
+	enum sci_status status;
+
+	sci_remote_device_construct(iport, idev);
+
+	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+								  idev,
+								  &idev->rnc.remote_node_index);
+	if (status != SCI_SUCCESS)
+		return status;
+
+	/* For SAS-2 the physical link rate is actually a logical link
+	 * rate that incorporates multiplexing.  The SCU doesn't
+	 * incorporate multiplexing and for the purposes of the
+	 * connection the logical link rate is that same as the
+	 * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
+	 * one another, so this code works for both situations.
+	 */
+	idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+					 dev->linkrate);
+
+	/* / @todo Should I assign the port width by reading all of the phys on the port? */
+	idev->device_port_width = 1;
+
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_resume(
+	struct isci_remote_device *idev,
+	scics_sds_remote_node_context_callback cb_fn,
+	void *cb_p)
+{
+	enum sci_status status;
+
+	status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
+	if (status != SCI_SUCCESS)
+		dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
+			__func__, status);
+	return status;
+}
+
+static void isci_remote_device_resume_from_abort_complete(void *cbparam)
+{
+	struct isci_remote_device *idev = cbparam;
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	scics_sds_remote_node_context_callback abort_resume_cb =
+		idev->abort_resume_cb;
+
+	dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
+		__func__, abort_resume_cb);
+
+	if (abort_resume_cb != NULL) {
+		idev->abort_resume_cb = NULL;
+		abort_resume_cb(idev->abort_resume_cbparam);
+	}
+	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+	wake_up(&ihost->eventq);
+}
+
+static bool isci_remote_device_test_resume_done(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	unsigned long flags;
+	bool done;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
+		|| test_bit(IDEV_STOP_PENDING, &idev->flags)
+		|| sci_remote_node_context_is_being_destroyed(&idev->rnc);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return done;
+}
+
+void isci_remote_device_wait_for_resume_from_abort(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
+		 __func__, idev);
+
+	#define MAX_RESUME_MSECS 10000
+	if (!wait_event_timeout(ihost->eventq,
+				isci_remote_device_test_resume_done(ihost, idev),
+				msecs_to_jiffies(MAX_RESUME_MSECS))) {
+
+		dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
+			 "resume: %p\n", __func__, idev);
+	}
+	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+	dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
+		 __func__, idev);
+}
+
+enum sci_status isci_remote_device_resume_from_abort(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	unsigned long flags;
+	enum sci_status status = SCI_SUCCESS;
+	int destroyed;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	/* Preserve any current resume callbacks, for instance from other
+	 * resumptions.
+	 */
+	idev->abort_resume_cb = idev->rnc.user_callback;
+	idev->abort_resume_cbparam = idev->rnc.user_cookie;
+	set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+	clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+	destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
+	if (!destroyed)
+		status = sci_remote_device_resume(
+			idev, isci_remote_device_resume_from_abort_complete,
+			idev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+	if (!destroyed && (status == SCI_SUCCESS))
+		isci_remote_device_wait_for_resume_from_abort(ihost, idev);
+	else
+		clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+	return status;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ *    device.  This method enables normal IO requests to flow through to the
+ *    remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+					       u32 timeout)
+{
+	struct sci_base_state_machine *sm = &idev->sm;
+	enum sci_remote_device_states state = sm->current_state_id;
+	enum sci_status status;
+
+	if (state != SCI_DEV_STOPPED) {
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	status = sci_remote_device_resume(idev, remote_device_resume_done,
+					  idev);
+	if (status != SCI_SUCCESS)
+		return status;
+
+	sci_change_state(sm, SCI_DEV_STARTING);
+
+	return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+						    struct isci_remote_device *idev)
+{
+	struct isci_host *ihost = iport->isci_host;
+	struct domain_device *dev = idev->domain_dev;
+	enum sci_status status;
+
+	if (dev->parent && dev_is_expander(dev->parent))
+		status = sci_remote_device_ea_construct(iport, idev);
+	else
+		status = sci_remote_device_da_construct(iport, idev);
+
+	if (status != SCI_SUCCESS) {
+		dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+			__func__, status);
+
+		return status;
+	}
+
+	/* start the device. */
+	status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+	if (status != SCI_SUCCESS)
+		dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+			 status);
+
+	return status;
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ *    is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+	struct isci_remote_device *idev;
+	int i;
+
+	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+		idev = &ihost->devices[i];
+		if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+			break;
+	}
+
+	if (i >= SCI_MAX_REMOTE_DEVICES) {
+		dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+		return NULL;
+	}
+	if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+		return NULL;
+
+	return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+	struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+	struct isci_host *ihost = idev->isci_port->isci_host;
+
+	idev->domain_dev = NULL;
+	idev->isci_port = NULL;
+	clear_bit(IDEV_START_PENDING, &idev->flags);
+	clear_bit(IDEV_STOP_PENDING, &idev->flags);
+	clear_bit(IDEV_IO_READY, &idev->flags);
+	clear_bit(IDEV_GONE, &idev->flags);
+	smp_mb__before_atomic();
+	clear_bit(IDEV_ALLOCATED, &idev->flags);
+	wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ *    remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+	enum sci_status status;
+	unsigned long flags;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p\n", __func__, idev);
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+	set_bit(IDEV_GONE, &idev->flags);
+
+	set_bit(IDEV_STOP_PENDING, &idev->flags);
+	status = sci_remote_device_stop(idev, 50);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	/* Wait for the stop complete callback. */
+	if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+		/* nothing to wait for */;
+	else
+		wait_for_device_stop(ihost, idev);
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p, waiting done.\n", __func__, idev);
+
+	return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ *    device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+	struct isci_host *ihost = dev_to_ihost(dev);
+	struct isci_remote_device *idev = dev->lldd_dev;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+		__func__, dev, idev, idev->isci_port);
+
+	isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ *    device is discovered. A remote device object is created and started. the
+ *    function then sleeps until the sci core device started message is
+ *    received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *dev)
+{
+	struct isci_host *isci_host = dev_to_ihost(dev);
+	struct isci_port *isci_port = dev->port->lldd_port;
+	struct isci_remote_device *isci_device;
+	enum sci_status status;
+
+	dev_dbg(&isci_host->pdev->dev,
+		"%s: domain_device = %p\n", __func__, dev);
+
+	if (!isci_port)
+		return -ENODEV;
+
+	isci_device = isci_remote_device_alloc(isci_host, isci_port);
+	if (!isci_device)
+		return -ENODEV;
+
+	kref_init(&isci_device->kref);
+	INIT_LIST_HEAD(&isci_device->node);
+
+	spin_lock_irq(&isci_host->scic_lock);
+	isci_device->domain_dev = dev;
+	isci_device->isci_port = isci_port;
+	list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+	set_bit(IDEV_START_PENDING, &isci_device->flags);
+	status = isci_remote_device_construct(isci_port, isci_device);
+
+	dev_dbg(&isci_host->pdev->dev,
+		"%s: isci_device = %p\n",
+		__func__, isci_device);
+
+	if (status == SCI_SUCCESS) {
+		/* device came up, advertise it to the world */
+		dev->lldd_dev = isci_device;
+	} else
+		isci_put_device(isci_device);
+	spin_unlock_irq(&isci_host->scic_lock);
+
+	/* wait for the device ready callback. */
+	wait_for_device_start(isci_host, isci_device);
+
+	return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+
+enum sci_status isci_remote_device_suspend_terminate(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq)
+{
+	unsigned long flags;
+	enum sci_status status;
+
+	/* Put the device into suspension. */
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	/* Terminate and wait for the completions. */
+	status = isci_remote_device_terminate_requests(ihost, idev, ireq);
+	if (status != SCI_SUCCESS)
+		dev_dbg(&ihost->pdev->dev,
+			"%s: isci_remote_device_terminate_requests(%p) "
+				"returned %d!\n",
+			__func__, idev, status);
+
+	/* NOTE: RNC resumption is left to the caller! */
+	return status;
+}
+
+int isci_remote_device_is_safe_to_abort(
+	struct isci_remote_device *idev)
+{
+	return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
+}
+
+enum sci_status sci_remote_device_abort_requests_pending_abort(
+	struct isci_remote_device *idev)
+{
+	return sci_remote_device_terminate_reqs_checkabort(idev, 1);
+}
+
+enum sci_status isci_remote_device_reset_complete(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	unsigned long flags;
+	enum sci_status status;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	status = sci_remote_device_reset_complete(idev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return status;
+}
+
+void isci_dev_set_hang_detection_timeout(
+	struct isci_remote_device *idev,
+	u32 timeout)
+{
+	if (dev_is_sata(idev->domain_dev)) {
+		if (timeout) {
+			if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
+					     &idev->flags))
+				return;  /* Already enabled. */
+		} else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
+					       &idev->flags))
+			return;  /* Not enabled. */
+
+		sci_port_set_hang_detection_timeout(idev->owning_port,
+						    timeout);
+	}
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 0000000..47a013f
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,387 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+	SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+	SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+	SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+	SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+	SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+	SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ *                   protocols we need a method to associate unsolicited
+ *                   frames with a pending request
+ */
+struct isci_remote_device {
+	#define IDEV_START_PENDING 0
+	#define IDEV_STOP_PENDING 1
+	#define IDEV_ALLOCATED 2
+	#define IDEV_GONE 3
+	#define IDEV_IO_READY 4
+	#define IDEV_IO_NCQERROR 5
+	#define IDEV_RNC_LLHANG_ENABLED 6
+	#define IDEV_ABORT_PATH_ACTIVE 7
+	#define IDEV_ABORT_PATH_RESUME_PENDING 8
+	unsigned long flags;
+	struct kref kref;
+	struct isci_port *isci_port;
+	struct domain_device *domain_dev;
+	struct list_head node;
+	struct sci_base_state_machine sm;
+	u32 device_port_width;
+	enum sas_linkrate connection_rate;
+	struct isci_port *owning_port;
+	struct sci_remote_node_context rnc;
+	/* XXX unify with device reference counting and delete */
+	u32 started_request_count;
+	struct isci_request *working_request;
+	u32 not_ready_reason;
+	scics_sds_remote_node_context_callback abort_resume_cb;
+	void *abort_resume_cbparam;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_get_device(
+	struct isci_remote_device *idev)
+{
+	if (idev)
+		kref_get(&idev->kref);
+	return idev;
+}
+
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+	struct isci_remote_device *idev = dev->lldd_dev;
+
+	if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+		kref_get(&idev->kref);
+		return idev;
+	}
+
+	return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+	if (idev)
+		kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+					struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+				      struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ *    reception of link activity for the supplied remote device.  This method
+ *    disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+	struct isci_remote_device *idev,
+	u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ *    ready for operation. This method must be called anytime the device is
+ *    reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+	struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ *    that the reset operation is complete and the device can resume operation
+ *    again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ *    of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+	struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ *    for the common remote device state machine.
+ * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
+ * state machine.
+ *
+ * @SCI_DEV_STOPPED: This state indicates that the remote device has
+ * successfully been stopped.  In this state no new IO operations are
+ * permitted.  This state is entered from the INITIAL state.  This state
+ * is entered from the STOPPING state.
+ *
+ * @SCI_DEV_STARTING: This state indicates the the remote device is in
+ * the process of becoming ready (i.e. starting).  In this state no new
+ * IO operations are permitted.  This state is entered from the STOPPED
+ * state.
+ *
+ * @SCI_DEV_READY: This state indicates the remote device is now ready.
+ * Thus, the user is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ *
+ * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
+ * device.  When there are no active IO for the device it is is in this
+ * state.
+ *
+ * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * device.  This state is entered when the device is processing a
+ * non-NCQ command.  The device object will fail any new start IO
+ * requests until this command is complete.
+ *
+ * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
+ * This state is entered when the device is processing an NCQ reuqest.
+ * It will remain in this state so long as there is one or more NCQ
+ * requests being processed.
+ *
+ * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
+ * remote device.  This state is entered when an SDB error FIS is
+ * received by the device object while in the NCQ state.  The device
+ * object will only accept a READ LOG command while in this state.
+ *
+ * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
+ * ATAPI remote device.  This state is entered when ATAPI device sends
+ * error status FIS without data while the device object is in CMD
+ * state.  A suspension event is expected in this state.  The device
+ * object will resume right away.
+ *
+ * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
+ * device is waiting for the RESET task coming to be recovered from
+ * certain hardware specific error.
+ *
+ * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
+ * remote device.  This is the normal operational state for a remote
+ * device.
+ *
+ * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
+ * This is the state that the device is placed in when a RNC suspend is
+ * received by the SCU hardware.
+ *
+ * @SCI_DEV_STOPPING: This state indicates that the remote device is in
+ * the process of stopping.  In this state no new IO operations are
+ * permitted, but existing IO operations are allowed to complete.  This
+ * state is entered from the READY state.  This state is entered from
+ * the FAILED state.
+ *
+ * @SCI_DEV_FAILED: This state indicates that the remote device has
+ * failed.  In this state no new IO operations are permitted.  This
+ * state is entered from the INITIALIZING state.  This state is entered
+ * from the READY state.
+ *
+ * @SCI_DEV_RESETTING: This state indicates the device is being reset.
+ * In this state no new IO operations are permitted.  This state is
+ * entered from the READY state.
+ *
+ * @SCI_DEV_FINAL: Simply the final state for the base remote device
+ * state machine.
+ */
+#define REMOTE_DEV_STATES {\
+	C(DEV_INITIAL),\
+	C(DEV_STOPPED),\
+	C(DEV_STARTING),\
+	C(DEV_READY),\
+	C(STP_DEV_IDLE),\
+	C(STP_DEV_CMD),\
+	C(STP_DEV_NCQ),\
+	C(STP_DEV_NCQ_ERROR),\
+	C(STP_DEV_ATAPI_ERROR),\
+	C(STP_DEV_AWAIT_RESET),\
+	C(SMP_DEV_IDLE),\
+	C(SMP_DEV_CMD),\
+	C(DEV_STOPPING),\
+	C(DEV_FAILED),\
+	C(DEV_RESETTING),\
+	C(DEV_FINAL),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_remote_device_states REMOTE_DEV_STATES;
+#undef C
+const char *dev_state_name(enum sci_remote_device_states state);
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+	struct isci_remote_device *idev;
+
+	idev = container_of(rnc, typeof(*idev), rnc);
+
+	return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+	return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+	/* XXX delete this voodoo when converting to the top-level device
+	 * reference count
+	 */
+	if (WARN_ONCE(idev->started_request_count == 0,
+		      "%s: tried to decrement started_request_count past 0!?",
+			__func__))
+		/* pass */;
+	else
+		idev->started_request_count--;
+}
+
+void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
+
+enum sci_status sci_remote_device_frame_handler(
+	struct isci_remote_device *idev,
+	u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+	struct isci_remote_device *idev,
+	u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+void sci_remote_device_post_request(
+	struct isci_remote_device *idev,
+	u32 request);
+
+enum sci_status sci_remote_device_terminate_requests(
+	struct isci_remote_device *idev);
+
+int isci_remote_device_is_safe_to_abort(
+	struct isci_remote_device *idev);
+
+enum sci_status
+sci_remote_device_abort_requests_pending_abort(
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status sci_remote_device_resume(
+	struct isci_remote_device *idev,
+	scics_sds_remote_node_context_callback cb_fn,
+	void *cb_p);
+
+enum sci_status isci_remote_device_resume_from_abort(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset_complete(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend_terminate(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status isci_remote_device_terminate_requests(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+					  enum sci_remote_node_suspension_reasons reason);
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 0000000..00602ab
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,812 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas_ata.h>
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+#undef C
+#define C(a) (#a)
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
+{
+	static const char * const strings[] = RNC_STATES;
+
+	if (state >= ARRAY_SIZE(strings))
+		return "UNKNOWN";
+
+	return strings[state];
+}
+#undef C
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+	struct sci_remote_node_context *sci_rnc)
+{
+	u32 current_state = sci_rnc->sm.current_state_id;
+
+	if (current_state == SCI_RNC_READY) {
+		return true;
+	}
+
+	return false;
+}
+
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
+{
+	u32 current_state = sci_rnc->sm.current_state_id;
+
+	if (current_state == SCI_RNC_TX_RX_SUSPENDED)
+		return true;
+	return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+	if (id < ihost->remote_node_entries &&
+	    ihost->device_table[id])
+		return &ihost->remote_node_context_table[id];
+
+	return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+	struct domain_device *dev = idev->domain_dev;
+	int rni = sci_rnc->remote_node_index;
+	union scu_remote_node_context *rnc;
+	struct isci_host *ihost;
+	__le64 sas_addr;
+
+	ihost = idev->owning_port->owning_controller;
+	rnc = sci_rnc_by_id(ihost, rni);
+
+	memset(rnc, 0, sizeof(union scu_remote_node_context)
+		* sci_remote_device_node_count(idev));
+
+	rnc->ssp.remote_node_index = rni;
+	rnc->ssp.remote_node_port_width = idev->device_port_width;
+	rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+	/* sas address is __be64, context ram format is __le64 */
+	sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+	rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+	rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+	rnc->ssp.nexus_loss_timer_enable = true;
+	rnc->ssp.check_bit               = false;
+	rnc->ssp.is_valid                = false;
+	rnc->ssp.is_remote_node_context  = true;
+	rnc->ssp.function_number         = 0;
+
+	rnc->ssp.arbitration_wait_time = 0;
+
+	if (dev_is_sata(dev)) {
+		rnc->ssp.connection_occupancy_timeout =
+			ihost->user_parameters.stp_max_occupancy_timeout;
+		rnc->ssp.connection_inactivity_timeout =
+			ihost->user_parameters.stp_inactivity_timeout;
+	} else {
+		rnc->ssp.connection_occupancy_timeout  =
+			ihost->user_parameters.ssp_max_occupancy_timeout;
+		rnc->ssp.connection_inactivity_timeout =
+			ihost->user_parameters.ssp_inactivity_timeout;
+	}
+
+	rnc->ssp.initial_arbitration_wait_time = 0;
+
+	/* Open Address Frame Parameters */
+	rnc->ssp.oaf_connection_rate = idev->connection_rate;
+	rnc->ssp.oaf_features = 0;
+	rnc->ssp.oaf_source_zone_group = 0;
+	rnc->ssp.oaf_more_compatibility_features = 0;
+}
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state.  If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+	struct sci_remote_node_context *sci_rnc,
+	scics_sds_remote_node_context_callback callback,
+	void *callback_parameter,
+	enum sci_remote_node_context_destination_state dest_param)
+{
+	if (sci_rnc->destination_state != RNC_DEST_FINAL) {
+		sci_rnc->destination_state = dest_param;
+		if (callback != NULL) {
+			sci_rnc->user_callback = callback;
+			sci_rnc->user_cookie   = callback_parameter;
+		}
+	}
+}
+
+static void sci_remote_node_context_setup_to_destroy(
+	struct sci_remote_node_context *sci_rnc,
+	scics_sds_remote_node_context_callback callback,
+	void *callback_parameter)
+{
+	struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
+
+	sci_rnc->destination_state = RNC_DEST_FINAL;
+	sci_rnc->user_callback     = callback;
+	sci_rnc->user_cookie       = callback_parameter;
+
+	wake_up(&ihost->eventq);
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+	struct sci_remote_node_context *rnc)
+{
+	if (rnc->user_callback != NULL) {
+		(*rnc->user_callback)(rnc->user_cookie);
+
+		rnc->user_callback = NULL;
+		rnc->user_cookie = NULL;
+	}
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+	switch (rnc->destination_state) {
+	case RNC_DEST_READY:
+	case RNC_DEST_SUSPENDED_RESUME:
+		rnc->destination_state = RNC_DEST_READY;
+		/* Fall through... */
+	case RNC_DEST_FINAL:
+		sci_remote_node_context_resume(rnc, rnc->user_callback,
+					       rnc->user_cookie);
+		break;
+	default:
+		rnc->destination_state = RNC_DEST_UNSPECIFIED;
+		break;
+	}
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+	union scu_remote_node_context *rnc_buffer;
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+	struct domain_device *dev = idev->domain_dev;
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+	rnc_buffer->ssp.is_valid = true;
+
+	if (dev_is_sata(dev) && dev->parent) {
+		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+	} else {
+		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+		if (!dev->parent)
+			sci_port_setup_transports(idev->owning_port,
+						  sci_rnc->remote_node_index);
+	}
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+	union scu_remote_node_context *rnc_buffer;
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+	rnc_buffer->ssp.is_valid = false;
+
+	sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+				       SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev = rnc_to_dev(rnc);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	/* Check to see if we have gotten back to the initial state because
+	 * someone requested to destroy the remote node context object.
+	 */
+	if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+		rnc->destination_state = RNC_DEST_UNSPECIFIED;
+		sci_remote_node_context_notify_user(rnc);
+
+		smp_wmb();
+		wake_up(&ihost->eventq);
+	}
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+	sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+	/* Terminate all outstanding requests. */
+	sci_remote_device_terminate_requests(rnc_to_dev(rnc));
+	sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev;
+	struct domain_device *dev;
+
+	idev = rnc_to_dev(rnc);
+	dev = idev->domain_dev;
+
+	/*
+	 * For direct attached SATA devices we need to clear the TLCR
+	 * NCQ to TCi tag mapping on the phy and in cases where we
+	 * resume because of a target reset we also need to update
+	 * the STPTLDARNI register with the RNi of the device
+	 */
+	if (dev_is_sata(dev) && !dev->parent)
+		sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
+
+	sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	enum sci_remote_node_context_destination_state dest_select;
+	int tell_user = 1;
+
+	dest_select = rnc->destination_state;
+	rnc->destination_state = RNC_DEST_UNSPECIFIED;
+
+	if ((dest_select == RNC_DEST_SUSPENDED) ||
+	    (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
+		sci_remote_node_context_suspend(
+			rnc, rnc->suspend_reason,
+			SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
+
+		if (dest_select == RNC_DEST_SUSPENDED_RESUME)
+			tell_user = 0;  /* Wait until ready again. */
+	}
+	if (tell_user)
+		sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+	sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev = rnc_to_dev(rnc);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	u32 new_count = rnc->suspend_count + 1;
+
+	if (new_count == 0)
+		rnc->suspend_count = 1;
+	else
+		rnc->suspend_count = new_count;
+	smp_wmb();
+
+	/* Terminate outstanding requests pending abort. */
+	sci_remote_device_abort_requests_pending_abort(idev);
+
+	wake_up(&ihost->eventq);
+	sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_await_suspend_state_exit(
+	struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc
+		= container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev = rnc_to_dev(rnc);
+
+	if (dev_is_sata(idev->domain_dev))
+		isci_dev_set_hang_detection_timeout(idev, 0);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+	[SCI_RNC_INITIAL] = {
+		.enter_state = sci_remote_node_context_initial_state_enter,
+	},
+	[SCI_RNC_POSTING] = {
+		.enter_state = sci_remote_node_context_posting_state_enter,
+	},
+	[SCI_RNC_INVALIDATING] = {
+		.enter_state = sci_remote_node_context_invalidating_state_enter,
+	},
+	[SCI_RNC_RESUMING] = {
+		.enter_state = sci_remote_node_context_resuming_state_enter,
+	},
+	[SCI_RNC_READY] = {
+		.enter_state = sci_remote_node_context_ready_state_enter,
+	},
+	[SCI_RNC_TX_SUSPENDED] = {
+		.enter_state = sci_remote_node_context_tx_suspended_state_enter,
+	},
+	[SCI_RNC_TX_RX_SUSPENDED] = {
+		.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+	},
+	[SCI_RNC_AWAIT_SUSPENSION] = {
+		.exit_state = sci_remote_node_context_await_suspend_state_exit,
+	},
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+					    u16 remote_node_index)
+{
+	memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+	rnc->remote_node_index = remote_node_index;
+	rnc->destination_state = RNC_DEST_UNSPECIFIED;
+
+	sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+							   u32 event_code)
+{
+	enum scis_sds_remote_node_context_states state;
+	u32 next_state;
+
+	state = sci_rnc->sm.current_state_id;
+	switch (state) {
+	case SCI_RNC_POSTING:
+		switch (scu_get_event_code(event_code)) {
+		case SCU_EVENT_POST_RNC_COMPLETE:
+			sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+			break;
+		default:
+			goto out;
+		}
+		break;
+	case SCI_RNC_INVALIDATING:
+		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+			if (sci_rnc->destination_state == RNC_DEST_FINAL)
+				next_state = SCI_RNC_INITIAL;
+			else
+				next_state = SCI_RNC_POSTING;
+			sci_change_state(&sci_rnc->sm, next_state);
+		} else {
+			switch (scu_get_event_type(event_code)) {
+			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+				/* We really dont care if the hardware is going to suspend
+				 * the device since it's being invalidated anyway */
+				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+					"%s: SCIC Remote Node Context 0x%p was "
+					"suspeneded by hardware while being "
+					"invalidated.\n", __func__, sci_rnc);
+				break;
+			default:
+				goto out;
+			}
+		}
+		break;
+	case SCI_RNC_RESUMING:
+		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+			sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+		} else {
+			switch (scu_get_event_type(event_code)) {
+			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+				/* We really dont care if the hardware is going to suspend
+				 * the device since it's being resumed anyway */
+				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+					"%s: SCIC Remote Node Context 0x%p was "
+					"suspeneded by hardware while being resumed.\n",
+					__func__, sci_rnc);
+				break;
+			default:
+				goto out;
+			}
+		}
+		break;
+	case SCI_RNC_READY:
+		switch (scu_get_event_type(event_code)) {
+		case SCU_EVENT_TL_RNC_SUSPEND_TX:
+			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+			sci_rnc->suspend_type = scu_get_event_type(event_code);
+			break;
+		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+			sci_rnc->suspend_type = scu_get_event_type(event_code);
+			break;
+		default:
+			goto out;
+		}
+		break;
+	case SCI_RNC_AWAIT_SUSPENSION:
+		switch (scu_get_event_type(event_code)) {
+		case SCU_EVENT_TL_RNC_SUSPEND_TX:
+			next_state = SCI_RNC_TX_SUSPENDED;
+			break;
+		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+			next_state = SCI_RNC_TX_RX_SUSPENDED;
+			break;
+		default:
+			goto out;
+		}
+		if (sci_rnc->suspend_type == scu_get_event_type(event_code))
+			sci_change_state(&sci_rnc->sm, next_state);
+		break;
+	default:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state: %s\n", __func__,
+			 rnc_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+	return SCI_SUCCESS;
+
+ out:
+	dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+		 "%s: code: %#x state: %s\n", __func__, event_code,
+		 rnc_state_name(state));
+	return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+						      scics_sds_remote_node_context_callback cb_fn,
+						      void *cb_p)
+{
+	enum scis_sds_remote_node_context_states state;
+
+	state = sci_rnc->sm.current_state_id;
+	switch (state) {
+	case SCI_RNC_INVALIDATING:
+		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+		return SCI_SUCCESS;
+	case SCI_RNC_POSTING:
+	case SCI_RNC_RESUMING:
+	case SCI_RNC_READY:
+	case SCI_RNC_TX_SUSPENDED:
+	case SCI_RNC_TX_RX_SUSPENDED:
+		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+		sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+		return SCI_SUCCESS;
+	case SCI_RNC_AWAIT_SUSPENSION:
+		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+		return SCI_SUCCESS;
+	case SCI_RNC_INITIAL:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state: %s\n", __func__,
+			 rnc_state_name(state));
+		/* We have decided that the destruct request on the remote node context
+		 * can not fail since it is either in the initial/destroyed state or is
+		 * can be destroyed.
+		 */
+		return SCI_SUCCESS;
+	default:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_remote_node_context_suspend(
+			struct sci_remote_node_context *sci_rnc,
+			enum sci_remote_node_suspension_reasons suspend_reason,
+			u32 suspend_type)
+{
+	enum scis_sds_remote_node_context_states state
+		= sci_rnc->sm.current_state_id;
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+	enum sci_status status = SCI_FAILURE_INVALID_STATE;
+	enum sci_remote_node_context_destination_state dest_param =
+		RNC_DEST_UNSPECIFIED;
+
+	dev_dbg(scirdev_to_dev(idev),
+		"%s: current state %s, current suspend_type %x dest state %d,"
+			" arg suspend_reason %d, arg suspend_type %x",
+		__func__, rnc_state_name(state), sci_rnc->suspend_type,
+		sci_rnc->destination_state, suspend_reason,
+		suspend_type);
+
+	/* Disable automatic state continuations if explicitly suspending. */
+	if ((suspend_reason == SCI_HW_SUSPEND) ||
+	    (sci_rnc->destination_state == RNC_DEST_FINAL))
+		dest_param = sci_rnc->destination_state;
+
+	switch (state) {
+	case SCI_RNC_READY:
+		break;
+	case SCI_RNC_INVALIDATING:
+		if (sci_rnc->destination_state == RNC_DEST_FINAL) {
+			dev_warn(scirdev_to_dev(idev),
+				 "%s: already destroying %p\n",
+				 __func__, sci_rnc);
+			return SCI_FAILURE_INVALID_STATE;
+		}
+		/* Fall through and handle like SCI_RNC_POSTING */
+	case SCI_RNC_RESUMING:
+		/* Fall through and handle like SCI_RNC_POSTING */
+	case SCI_RNC_POSTING:
+		/* Set the destination state to AWAIT - this signals the
+		 * entry into the SCI_RNC_READY state that a suspension
+		 * needs to be done immediately.
+		 */
+		if (sci_rnc->destination_state != RNC_DEST_FINAL)
+			sci_rnc->destination_state = RNC_DEST_SUSPENDED;
+		sci_rnc->suspend_type = suspend_type;
+		sci_rnc->suspend_reason = suspend_reason;
+		return SCI_SUCCESS;
+
+	case SCI_RNC_TX_SUSPENDED:
+		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
+			status = SCI_SUCCESS;
+		break;
+	case SCI_RNC_TX_RX_SUSPENDED:
+		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+			status = SCI_SUCCESS;
+		break;
+	case SCI_RNC_AWAIT_SUSPENSION:
+		if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+		    || (suspend_type == sci_rnc->suspend_type))
+			return SCI_SUCCESS;
+		break;
+	default:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+	sci_rnc->destination_state = dest_param;
+	sci_rnc->suspend_type = suspend_type;
+	sci_rnc->suspend_reason = suspend_reason;
+
+	if (status == SCI_SUCCESS) { /* Already in the destination state? */
+		struct isci_host *ihost = idev->owning_port->owning_controller;
+
+		wake_up_all(&ihost->eventq); /* Let observers look. */
+		return SCI_SUCCESS;
+	}
+	if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
+	    (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
+
+		if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
+			isci_dev_set_hang_detection_timeout(idev, 0x00000001);
+
+		sci_remote_device_post_request(
+			idev, SCI_SOFTWARE_SUSPEND_CMD);
+	}
+	if (state != SCI_RNC_AWAIT_SUSPENSION)
+		sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+						    scics_sds_remote_node_context_callback cb_fn,
+						    void *cb_p)
+{
+	enum scis_sds_remote_node_context_states state;
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+
+	state = sci_rnc->sm.current_state_id;
+	dev_dbg(scirdev_to_dev(idev),
+		"%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
+			"dev resume path %s\n",
+		__func__, rnc_state_name(state), cb_fn, cb_p,
+		sci_rnc->destination_state,
+		test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
+			? "<abort active>" : "<normal>");
+
+	switch (state) {
+	case SCI_RNC_INITIAL:
+		if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+			return SCI_FAILURE_INVALID_STATE;
+
+		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn,	cb_p,
+							RNC_DEST_READY);
+		if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+			sci_remote_node_context_construct_buffer(sci_rnc);
+			sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+		}
+		return SCI_SUCCESS;
+
+	case SCI_RNC_POSTING:
+	case SCI_RNC_INVALIDATING:
+	case SCI_RNC_RESUMING:
+		/* We are still waiting to post when a resume was
+		 * requested.
+		 */
+		switch (sci_rnc->destination_state) {
+		case RNC_DEST_SUSPENDED:
+		case RNC_DEST_SUSPENDED_RESUME:
+			/* Previously waiting to suspend after posting.
+			 * Now continue onto resumption.
+			 */
+			sci_remote_node_context_setup_to_resume(
+				sci_rnc, cb_fn, cb_p,
+				RNC_DEST_SUSPENDED_RESUME);
+			break;
+		default:
+			sci_remote_node_context_setup_to_resume(
+				sci_rnc, cb_fn, cb_p,
+				RNC_DEST_READY);
+			break;
+		}
+		return SCI_SUCCESS;
+
+	case SCI_RNC_TX_SUSPENDED:
+	case SCI_RNC_TX_RX_SUSPENDED:
+		{
+			struct domain_device *dev = idev->domain_dev;
+			/* If this is an expander attached SATA device we must
+			 * invalidate and repost the RNC since this is the only
+			 * way to clear the TCi to NCQ tag mapping table for
+			 * the RNi. All other device types we can just resume.
+			 */
+			sci_remote_node_context_setup_to_resume(
+				sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
+
+			if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+				if ((dev_is_sata(dev) && dev->parent) ||
+				    (sci_rnc->destination_state == RNC_DEST_FINAL))
+					sci_change_state(&sci_rnc->sm,
+							 SCI_RNC_INVALIDATING);
+				else
+					sci_change_state(&sci_rnc->sm,
+							 SCI_RNC_RESUMING);
+			}
+		}
+		return SCI_SUCCESS;
+
+	case SCI_RNC_AWAIT_SUSPENSION:
+		sci_remote_node_context_setup_to_resume(
+			sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
+		return SCI_SUCCESS;
+	default:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+							     struct isci_request *ireq)
+{
+	enum scis_sds_remote_node_context_states state;
+
+	state = sci_rnc->sm.current_state_id;
+
+	switch (state) {
+	case SCI_RNC_READY:
+		return SCI_SUCCESS;
+	case SCI_RNC_TX_SUSPENDED:
+	case SCI_RNC_TX_RX_SUSPENDED:
+	case SCI_RNC_AWAIT_SUSPENSION:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
+		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+	default:
+		dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			"%s: invalid state %s\n", __func__,
+			rnc_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+enum sci_status sci_remote_node_context_start_task(
+	struct sci_remote_node_context *sci_rnc,
+	struct isci_request *ireq,
+	scics_sds_remote_node_context_callback cb_fn,
+	void *cb_p)
+{
+	enum sci_status status = sci_remote_node_context_resume(sci_rnc,
+								cb_fn, cb_p);
+	if (status != SCI_SUCCESS)
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			"%s: resume failed: %d\n", __func__, status);
+	return status;
+}
+
+int sci_remote_node_context_is_safe_to_abort(
+	struct sci_remote_node_context *sci_rnc)
+{
+	enum scis_sds_remote_node_context_states state;
+
+	state = sci_rnc->sm.current_state_id;
+	switch (state) {
+	case SCI_RNC_INVALIDATING:
+	case SCI_RNC_TX_RX_SUSPENDED:
+		return 1;
+	case SCI_RNC_POSTING:
+	case SCI_RNC_RESUMING:
+	case SCI_RNC_READY:
+	case SCI_RNC_TX_SUSPENDED:
+	case SCI_RNC_AWAIT_SUSPENSION:
+	case SCI_RNC_INITIAL:
+		return 0;
+	default:
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			 "%s: invalid state %d\n", __func__, state);
+		return 0;
+	}
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 0000000..c7ee81d
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,236 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ *    the remote node context in the silicon.  It exists to model and manage
+ *    the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX    0x0FFF
+
+enum sci_remote_node_suspension_reasons {
+	SCI_HW_SUSPEND,
+	SCI_SW_SUSPEND_NORMAL,
+	SCI_SW_SUSPEND_LINKHANG_DETECT
+};
+#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
+#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * enum sci_remote_node_context_states
+ * @SCI_RNC_INITIAL initial state for a remote node context.  On a resume
+ * request the remote node context will transition to the posting state.
+ *
+ * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
+ * the RNC is posted the remote node context will be made ready.
+ *
+ * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
+ * the hardware.  Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ *
+ * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
+ * hardare.  Once the event notification of resume complete is received the
+ * remote node context will transition to the ready state.
+ *
+ * @SCI_RNC_READY: state that the remote node context must be in to accept io
+ * request operations.
+ *
+ * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
+ * it gets a TX suspend notification from the hardware.
+ *
+ * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
+ * when it gets a TX RX suspend notification from the hardware.
+ *
+ * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
+ * for a suspend notification from the hardware.  This state is entered when
+ * either there is a request to supend the remote node context or when there is
+ * a TC completion where the remote node will be suspended by the hardware.
+ */
+#define RNC_STATES {\
+	C(RNC_INITIAL),\
+	C(RNC_POSTING),\
+	C(RNC_INVALIDATING),\
+	C(RNC_RESUMING),\
+	C(RNC_READY),\
+	C(RNC_TX_SUSPENDED),\
+	C(RNC_TX_RX_SUSPENDED),\
+	C(RNC_AWAIT_SUSPENSION),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum scis_sds_remote_node_context_states RNC_STATES;
+#undef C
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+	RNC_DEST_UNSPECIFIED,
+	RNC_DEST_READY,
+	RNC_DEST_FINAL,
+	RNC_DEST_SUSPENDED,       /* Set when suspend during post/invalidate */
+	RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
+				   * or invalidating and already suspending.
+				   */
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ *    associated with the remote node context object.  The remote node context
+ *    (RNC) object models the the remote device information necessary to manage
+ *    the silicon RNC.
+ */
+struct sci_remote_node_context {
+	/**
+	 * This field indicates the remote node index (RNI) associated with
+	 * this RNC.
+	 */
+	u16 remote_node_index;
+
+	/**
+	 * This field is the recored suspension type of the remote node
+	 * context suspension.
+	 */
+	u32 suspend_type;
+	enum sci_remote_node_suspension_reasons suspend_reason;
+	u32 suspend_count;
+
+	/**
+	 * This field is true if the remote node context is resuming from its current
+	 * state.  This can cause an automatic resume on receiving a suspension
+	 * notification.
+	 */
+	enum sci_remote_node_context_destination_state destination_state;
+
+	/**
+	 * This field contains the callback function that the user requested to be
+	 * called when the requested state transition is complete.
+	 */
+	scics_sds_remote_node_context_callback user_callback;
+
+	/**
+	 * This field contains the parameter that is called when the user requested
+	 * state transition is completed.
+	 */
+	void *user_cookie;
+
+	/**
+	 * This field contains the data for the object's state machine.
+	 */
+	struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+					    u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+	struct sci_remote_node_context *sci_rnc);
+
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+							   u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+						      scics_sds_remote_node_context_callback callback,
+						      void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+						     enum sci_remote_node_suspension_reasons reason,
+						     u32 suspension_code);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+						    scics_sds_remote_node_context_callback cb_fn,
+						    void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+						   struct isci_request *ireq,
+						   scics_sds_remote_node_context_callback cb_fn,
+						   void *cb_p);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+						      struct isci_request *ireq);
+int sci_remote_node_context_is_safe_to_abort(
+	struct sci_remote_node_context *sci_rnc);
+
+static inline bool sci_remote_node_context_is_being_destroyed(
+	struct sci_remote_node_context *sci_rnc)
+{
+	return (sci_rnc->destination_state == RNC_DEST_FINAL)
+		|| ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
+		    && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
+}
+#endif  /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 0000000..301b314
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ *    public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ *    selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ *    search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position.  If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_table_index)
+{
+	u32 dword_index;
+	u32 *group_table;
+	u32 bit_index;
+
+	group_table = remote_node_table->remote_node_groups[group_table_index];
+
+	for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+		if (group_table[dword_index] != 0) {
+			for (bit_index = 0; bit_index < 32; bit_index++) {
+				if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+					return (dword_index * 32) + bit_index;
+				}
+			}
+		}
+	}
+
+	return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ *    selector.
+ * @set_index: This is the remote node selector in which the change will be
+ *    made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_table_index,
+	u32 group_index)
+{
+	u32 dword_index;
+	u32 bit_index;
+	u32 *group_table;
+
+	BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+	BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+	dword_index = group_index / 32;
+	bit_index   = group_index % 32;
+	group_table = remote_node_table->remote_node_groups[group_table_index];
+
+	group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ *    selector.
+ * @group_table_index: This is the remote node selector in which the change
+ *    will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_table_index,
+	u32 group_index)
+{
+	u32 dword_index;
+	u32 bit_index;
+	u32 *group_table;
+
+	BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+	BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+	dword_index = group_index / 32;
+	bit_index   = group_index % 32;
+	group_table = remote_node_table->remote_node_groups[group_table_index];
+
+	group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ *    the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ *    the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_index)
+{
+	u32 dword_location;
+	u32 dword_remainder;
+	u32 slot_normalized;
+	u32 slot_position;
+
+	BUG_ON(
+		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+		<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+		);
+
+	dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+	dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+	slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+	slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+	remote_node_table->available_remote_nodes[dword_location] |=
+		1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ *    the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ *    from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_index)
+{
+	u32 dword_location;
+	u32 dword_remainder;
+	u32 slot_position;
+	u32 slot_normalized;
+
+	BUG_ON(
+		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+		<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+		);
+
+	dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+	dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+	slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+	slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+	remote_node_table->available_remote_nodes[dword_location] &=
+		~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ *    cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_index)
+{
+	u32 dword_location;
+	u32 dword_remainder;
+	u32 dword_value;
+
+	BUG_ON(
+		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+		<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+		);
+
+	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+	dword_value = remote_node_table->available_remote_nodes[dword_location];
+	dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+	remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_index)
+{
+	u32 dword_location;
+	u32 dword_remainder;
+	u32 dword_value;
+
+	BUG_ON(
+		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+		<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+		);
+
+	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+	dword_value = remote_node_table->available_remote_nodes[dword_location];
+	dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+	remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ *    value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_index)
+{
+	u32 dword_location;
+	u32 dword_remainder;
+	u32 dword_value;
+
+	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+	dword_value = remote_node_table->available_remote_nodes[dword_location];
+	dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+	dword_value = dword_value >> (dword_remainder * 4);
+
+	return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_entries)
+{
+	u32 index;
+
+	/*
+	 * Initialize the raw data we could improve the speed by only initializing
+	 * those entries that we are actually going to be used */
+	memset(
+		remote_node_table->available_remote_nodes,
+		0x00,
+		sizeof(remote_node_table->available_remote_nodes)
+		);
+
+	memset(
+		remote_node_table->remote_node_groups,
+		0x00,
+		sizeof(remote_node_table->remote_node_groups)
+		);
+
+	/* Initialize the available remote node sets */
+	remote_node_table->available_nodes_array_size = (u16)
+							(remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+							+ ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+	/* Initialize each full DWORD to a FULL SET of remote nodes */
+	for (index = 0; index < remote_node_entries; index++) {
+		sci_remote_node_table_set_node_index(remote_node_table, index);
+	}
+
+	remote_node_table->group_array_size = (u16)
+					      (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+					      + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+	for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+		/*
+		 * These are all guaranteed to be full slot values so fill them in the
+		 * available sets of 3 remote nodes */
+		sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+	}
+
+	/* Now fill in any remainders that we may find */
+	if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+		sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+	} else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+		sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+	}
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ *    remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table.  The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified.  The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group.  If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_table_index)
+{
+	u8 index;
+	u8 group_value;
+	u32 group_index;
+	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+	group_index = sci_remote_node_table_get_group_index(
+		remote_node_table, group_table_index);
+
+	/* We could not find an available slot in the table selector 0 */
+	if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+		group_value = sci_remote_node_table_get_group_value(
+			remote_node_table, group_index);
+
+		for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+			if (((1 << index) & group_value) != 0) {
+				/* We have selected a bit now clear it */
+				remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+							  + index);
+
+				sci_remote_node_table_clear_group_index(
+					remote_node_table, group_table_index, group_index
+					);
+
+				sci_remote_node_table_clear_node_index(
+					remote_node_table, remote_node_index
+					);
+
+				if (group_table_index > 0) {
+					sci_remote_node_table_set_group_index(
+						remote_node_table, group_table_index - 1, group_index
+						);
+				}
+
+				break;
+			}
+		}
+	}
+
+	return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ *    remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ *    for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+	struct sci_remote_node_table *remote_node_table,
+	u32 group_table_index)
+{
+	u32 group_index;
+	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+	group_index = sci_remote_node_table_get_group_index(
+		remote_node_table, group_table_index);
+
+	if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+		remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+		sci_remote_node_table_clear_group_index(
+			remote_node_table, group_table_index, group_index
+			);
+
+		sci_remote_node_table_clear_group(
+			remote_node_table, group_index
+			);
+	}
+
+	return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ *    allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ *    SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller.  Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_count)
+{
+	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+	if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+		remote_node_index =
+			sci_remote_node_table_allocate_single_remote_node(
+				remote_node_table, 0);
+
+		if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+			remote_node_index =
+				sci_remote_node_table_allocate_single_remote_node(
+					remote_node_table, 1);
+		}
+
+		if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+			remote_node_index =
+				sci_remote_node_table_allocate_single_remote_node(
+					remote_node_table, 2);
+		}
+	} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+		remote_node_index =
+			sci_remote_node_table_allocate_triple_remote_node(
+				remote_node_table, 2);
+	}
+
+	return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table.  This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+	struct sci_remote_node_table *remote_node_table,
+	u16 remote_node_index)
+{
+	u32 group_index;
+	u8 group_value;
+
+	group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+	group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+	/*
+	 * Assert that we are not trying to add an entry to a slot that is already
+	 * full. */
+	BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+	if (group_value == 0x00) {
+		/*
+		 * There are no entries in this slot so it must be added to the single
+		 * slot table. */
+		sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+	} else if ((group_value & (group_value - 1)) == 0) {
+		/*
+		 * There is only one entry in this slot so it must be moved from the
+		 * single slot table to the dual slot table */
+		sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+		sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+	} else {
+		/*
+		 * There are two entries in the slot so it must be moved from the dual
+		 * slot table to the tripple slot table. */
+		sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+		sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+	}
+
+	sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ *    index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+	struct sci_remote_node_table *remote_node_table,
+	u16 remote_node_index)
+{
+	u32 group_index;
+
+	group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+	sci_remote_node_table_set_group_index(
+		remote_node_table, 2, group_index
+		);
+
+	sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ *    to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ *    to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_count,
+	u16 remote_node_index)
+{
+	if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+		sci_remote_node_table_release_single_remote_node(
+			remote_node_table, remote_node_index);
+	} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+		sci_remote_node_table_release_triple_remote_node(
+			remote_node_table, remote_node_index);
+	}
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 0000000..721ab98
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+	(sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE	\
+	(SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD	\
+	(sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP   4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX      (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE    (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE   (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT        3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT        1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT       1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+	/**
+	 * This field contains the array size in dwords
+	 */
+	u16 available_nodes_array_size;
+
+	/**
+	 * This field contains the array size of the
+	 */
+	u16 group_array_size;
+
+	/**
+	 * This field is the array of available remote node entries in bits.
+	 * Because of the way STP remote node data is allocated on the SCU hardware
+	 * the remote nodes must occupy three consecutive remote node context
+	 * entries.  For ease of allocation and de-allocation we have broken the
+	 * sets of three into a single nibble.  When the STP RNi is allocated all
+	 * of the bits in the nibble are cleared.  This math results in a table size
+	 * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+	 */
+	u32 available_remote_nodes[
+		(SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+		+ ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+	/**
+	 * This field is the nibble selector for the above table.  There are three
+	 * possible selectors each for fast lookup when trying to find one, two or
+	 * three remote node entries.
+	 */
+	u32 remote_node_groups[
+		SCU_STP_REMOTE_NODE_COUNT][
+		(SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+		+ ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+	struct sci_remote_node_table *remote_node_table,
+	u32 remote_node_count,
+	u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 0000000..cfd0084
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3528 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <scsi/scsi_cmnd.h>
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+#undef C
+#define C(a) (#a)
+const char *req_state_name(enum sci_base_request_states state)
+{
+	static const char * const strings[] = REQUEST_STATES;
+
+	return strings[state];
+}
+#undef C
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+							int idx)
+{
+	if (idx == 0)
+		return &ireq->tc->sgl_pair_ab;
+	else if (idx == 1)
+		return &ireq->tc->sgl_pair_cd;
+	else if (idx < 0)
+		return NULL;
+	else
+		return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+					  struct isci_request *ireq, u32 idx)
+{
+	u32 offset;
+
+	if (idx == 0) {
+		offset = (void *) &ireq->tc->sgl_pair_ab -
+			 (void *) &ihost->task_context_table[0];
+		return ihost->tc_dma + offset;
+	} else if (idx == 1) {
+		offset = (void *) &ireq->tc->sgl_pair_cd -
+			 (void *) &ihost->task_context_table[0];
+		return ihost->tc_dma + offset;
+	}
+
+	return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+	e->length = sg_dma_len(sg);
+	e->address_upper = upper_32_bits(sg_dma_address(sg));
+	e->address_lower = lower_32_bits(sg_dma_address(sg));
+	e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+	struct isci_host *ihost = ireq->isci_host;
+	struct sas_task *task = isci_request_access_task(ireq);
+	struct scatterlist *sg = NULL;
+	dma_addr_t dma_addr;
+	u32 sg_idx = 0;
+	struct scu_sgl_element_pair *scu_sg   = NULL;
+	struct scu_sgl_element_pair *prev_sg  = NULL;
+
+	if (task->num_scatter > 0) {
+		sg = task->scatter;
+
+		while (sg) {
+			scu_sg = to_sgl_element_pair(ireq, sg_idx);
+			init_sgl_element(&scu_sg->A, sg);
+			sg = sg_next(sg);
+			if (sg) {
+				init_sgl_element(&scu_sg->B, sg);
+				sg = sg_next(sg);
+			} else
+				memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+			if (prev_sg) {
+				dma_addr = to_sgl_element_pair_dma(ihost,
+								   ireq,
+								   sg_idx);
+
+				prev_sg->next_pair_upper =
+					upper_32_bits(dma_addr);
+				prev_sg->next_pair_lower =
+					lower_32_bits(dma_addr);
+			}
+
+			prev_sg = scu_sg;
+			sg_idx++;
+		}
+	} else {	/* handle when no sg */
+		scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+		dma_addr = dma_map_single(&ihost->pdev->dev,
+					  task->scatter,
+					  task->total_xfer_len,
+					  task->data_dir);
+
+		ireq->zero_scatter_daddr = dma_addr;
+
+		scu_sg->A.length = task->total_xfer_len;
+		scu_sg->A.address_upper = upper_32_bits(dma_addr);
+		scu_sg->A.address_lower = lower_32_bits(dma_addr);
+	}
+
+	if (scu_sg) {
+		scu_sg->next_pair_upper = 0;
+		scu_sg->next_pair_lower = 0;
+	}
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+	struct ssp_cmd_iu *cmd_iu;
+	struct sas_task *task = isci_request_access_task(ireq);
+
+	cmd_iu = &ireq->ssp.cmd;
+
+	memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+	cmd_iu->add_cdb_len = 0;
+	cmd_iu->_r_a = 0;
+	cmd_iu->_r_b = 0;
+	cmd_iu->en_fburst = 0; /* unsupported */
+	cmd_iu->task_prio = task->ssp_task.task_prio;
+	cmd_iu->task_attr = task->ssp_task.task_attr;
+	cmd_iu->_r_c = 0;
+
+	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
+		       (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+	struct ssp_task_iu *task_iu;
+	struct sas_task *task = isci_request_access_task(ireq);
+	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+	task_iu = &ireq->ssp.tmf;
+
+	memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+	memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+	task_iu->task_func = isci_tmf->tmf_code;
+	task_iu->task_tag =
+		(test_bit(IREQ_TMF, &ireq->flags)) ?
+		isci_tmf->io_tag :
+		SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+	struct isci_request *ireq,
+	struct scu_task_context *task_context)
+{
+	dma_addr_t dma_addr;
+	struct isci_remote_device *idev;
+	struct isci_port *iport;
+
+	idev = ireq->target_device;
+	iport = idev->owning_port;
+
+	/* Fill in the TC with the its required data */
+	task_context->abort = 0;
+	task_context->priority = 0;
+	task_context->initiator_request = 1;
+	task_context->connection_rate = idev->connection_rate;
+	task_context->protocol_engine_index = ISCI_PEG;
+	task_context->logical_port_index = iport->physical_port_index;
+	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+	task_context->valid = SCU_TASK_CONTEXT_VALID;
+	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+	task_context->remote_node_index = idev->rnc.remote_node_index;
+	task_context->command_code = 0;
+
+	task_context->link_layer_control = 0;
+	task_context->do_not_dma_ssp_good_response = 1;
+	task_context->strict_ordering = 0;
+	task_context->control_frame = 0;
+	task_context->timeout_enable = 0;
+	task_context->block_guard_enable = 0;
+
+	task_context->address_modifier = 0;
+
+	/* task_context->type.ssp.tag = ireq->io_tag; */
+	task_context->task_phase = 0x01;
+
+	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+			      (iport->physical_port_index <<
+			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+			      ISCI_TAG_TCI(ireq->io_tag));
+
+	/*
+	 * Copy the physical address for the command buffer to the
+	 * SCU Task Context
+	 */
+	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+	task_context->command_iu_upper = upper_32_bits(dma_addr);
+	task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+	/*
+	 * Copy the physical address for the response buffer to the
+	 * SCU Task Context
+	 */
+	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+	task_context->response_iu_upper = upper_32_bits(dma_addr);
+	task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+static u8 scu_bg_blk_size(struct scsi_device *sdp)
+{
+	switch (sdp->sector_size) {
+	case 512:
+		return 0;
+	case 1024:
+		return 1;
+	case 4096:
+		return 3;
+	default:
+		return 0xff;
+	}
+}
+
+static u32 scu_dif_bytes(u32 len, u32 sector_size)
+{
+	return (len >> ilog2(sector_size)) * 8;
+}
+
+static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
+{
+	struct scu_task_context *tc = ireq->tc;
+	struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+	u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+	tc->block_guard_enable = 1;
+	tc->blk_prot_en = 1;
+	tc->blk_sz = blk_sz;
+	/* DIF write insert */
+	tc->blk_prot_func = 0x2;
+
+	tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+						   scmd->device->sector_size);
+
+	/* always init to 0, used by hw */
+	tc->interm_crc_val = 0;
+
+	tc->init_crc_seed = 0;
+	tc->app_tag_verify = 0;
+	tc->app_tag_gen = 0;
+	tc->ref_tag_seed_verify = 0;
+
+	/* always init to same as bg_blk_sz */
+	tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+	tc->reserved_DC_0 = 0;
+
+	/* always init to 8 */
+	tc->DIF_bytes_immed_val = 8;
+
+	tc->reserved_DC_1 = 0;
+	tc->bgc_blk_sz = scmd->device->sector_size;
+	tc->reserved_E0_0 = 0;
+	tc->app_tag_gen_mask = 0;
+
+	/** setup block guard control **/
+	tc->bgctl = 0;
+
+	/* DIF write insert */
+	tc->bgctl_f.op = 0x2;
+
+	tc->app_tag_verify_mask = 0;
+
+	/* must init to 0 for hw */
+	tc->blk_guard_err = 0;
+
+	tc->reserved_E8_0 = 0;
+
+	if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+		tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+	else if (type & SCSI_PROT_DIF_TYPE3)
+		tc->ref_tag_seed_gen = 0;
+}
+
+static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
+{
+	struct scu_task_context *tc = ireq->tc;
+	struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+	u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+	tc->block_guard_enable = 1;
+	tc->blk_prot_en = 1;
+	tc->blk_sz = blk_sz;
+	/* DIF read strip */
+	tc->blk_prot_func = 0x1;
+
+	tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+						   scmd->device->sector_size);
+
+	/* always init to 0, used by hw */
+	tc->interm_crc_val = 0;
+
+	tc->init_crc_seed = 0;
+	tc->app_tag_verify = 0;
+	tc->app_tag_gen = 0;
+
+	if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+		tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+	else if (type & SCSI_PROT_DIF_TYPE3)
+		tc->ref_tag_seed_verify = 0;
+
+	/* always init to same as bg_blk_sz */
+	tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+	tc->reserved_DC_0 = 0;
+
+	/* always init to 8 */
+	tc->DIF_bytes_immed_val = 8;
+
+	tc->reserved_DC_1 = 0;
+	tc->bgc_blk_sz = scmd->device->sector_size;
+	tc->reserved_E0_0 = 0;
+	tc->app_tag_gen_mask = 0;
+
+	/** setup block guard control **/
+	tc->bgctl = 0;
+
+	/* DIF read strip */
+	tc->bgctl_f.crc_verify = 1;
+	tc->bgctl_f.op = 0x1;
+	if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
+		tc->bgctl_f.ref_tag_chk = 1;
+		tc->bgctl_f.app_f_detect = 1;
+	} else if (type & SCSI_PROT_DIF_TYPE3)
+		tc->bgctl_f.app_ref_f_detect = 1;
+
+	tc->app_tag_verify_mask = 0;
+
+	/* must init to 0 for hw */
+	tc->blk_guard_err = 0;
+
+	tc->reserved_E8_0 = 0;
+	tc->ref_tag_seed_gen = 0;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+						      enum dma_data_direction dir,
+						      u32 len)
+{
+	struct scu_task_context *task_context = ireq->tc;
+	struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
+	struct scsi_cmnd *scmd = sas_task->uldd_task;
+	u8 prot_type = scsi_get_prot_type(scmd);
+	u8 prot_op = scsi_get_prot_op(scmd);
+
+	scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+	task_context->ssp_command_iu_length =
+		sizeof(struct ssp_cmd_iu) / sizeof(u32);
+	task_context->type.ssp.frame_type = SSP_COMMAND;
+
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+	case DMA_NONE:
+	default:
+		task_context->task_type = SCU_TASK_TYPE_IOREAD;
+		break;
+	case DMA_TO_DEVICE:
+		task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+		break;
+	}
+
+	task_context->transfer_length_bytes = len;
+
+	if (task_context->transfer_length_bytes > 0)
+		sci_request_build_sgl(ireq);
+
+	if (prot_type != SCSI_PROT_DIF_TYPE0) {
+		if (prot_op == SCSI_PROT_READ_STRIP)
+			scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
+		else if (prot_op == SCSI_PROT_WRITE_INSERT)
+			scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
+	}
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request.  The
+ *    following important settings are utilized: -# priority ==
+ *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
+ *    ahead of other task destined for the same Remote Node. -# task_type ==
+ *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
+ *    (i.e. non-raw frame) is being utilized to perform task management. -#
+ *    control_frame == 1.  This ensures that the proper endianess is set so
+ *    that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ *    constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+	struct scu_task_context *task_context = ireq->tc;
+
+	scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+	task_context->control_frame                = 1;
+	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
+	task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
+	task_context->transfer_length_bytes        = 0;
+	task_context->type.ssp.frame_type          = SSP_TASK;
+	task_context->ssp_command_iu_length =
+		sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ *    request.  This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ *    constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ *    constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+	struct isci_request *ireq,
+	struct scu_task_context *task_context)
+{
+	dma_addr_t dma_addr;
+	struct isci_remote_device *idev;
+	struct isci_port *iport;
+
+	idev = ireq->target_device;
+	iport = idev->owning_port;
+
+	/* Fill in the TC with the its required data */
+	task_context->abort = 0;
+	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+	task_context->initiator_request = 1;
+	task_context->connection_rate = idev->connection_rate;
+	task_context->protocol_engine_index = ISCI_PEG;
+	task_context->logical_port_index = iport->physical_port_index;
+	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+	task_context->valid = SCU_TASK_CONTEXT_VALID;
+	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+	task_context->remote_node_index = idev->rnc.remote_node_index;
+	task_context->command_code = 0;
+
+	task_context->link_layer_control = 0;
+	task_context->do_not_dma_ssp_good_response = 1;
+	task_context->strict_ordering = 0;
+	task_context->control_frame = 0;
+	task_context->timeout_enable = 0;
+	task_context->block_guard_enable = 0;
+
+	task_context->address_modifier = 0;
+	task_context->task_phase = 0x01;
+
+	task_context->ssp_command_iu_length =
+		(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+	/* Set the first word of the H2D REG FIS */
+	task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+			      (iport->physical_port_index <<
+			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+			      ISCI_TAG_TCI(ireq->io_tag));
+	/*
+	 * Copy the physical address for the command buffer to the SCU Task
+	 * Context. We must offset the command buffer by 4 bytes because the
+	 * first 4 bytes are transfered in the body of the TC.
+	 */
+	dma_addr = sci_io_request_get_dma_addr(ireq,
+						((char *) &ireq->stp.cmd) +
+						sizeof(u32));
+
+	task_context->command_iu_upper = upper_32_bits(dma_addr);
+	task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+	/* SATA Requests do not have a response buffer */
+	task_context->response_iu_upper = 0;
+	task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+	struct scu_task_context *task_context = ireq->tc;
+
+	scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+	task_context->control_frame         = 0;
+	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
+	task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
+	task_context->type.stp.fis_type     = FIS_REGH2D;
+	task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+							  bool copy_rx_frame)
+{
+	struct isci_stp_request *stp_req = &ireq->stp.req;
+
+	scu_stp_raw_request_construct_task_context(ireq);
+
+	stp_req->status = 0;
+	stp_req->sgl.offset = 0;
+	stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+	if (copy_rx_frame) {
+		sci_request_build_sgl(ireq);
+		stp_req->sgl.index = 0;
+	} else {
+		/* The user does not want the data copied to the SGL buffer location */
+		stp_req->sgl.index = -1;
+	}
+
+	return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ *    optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ *    value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+						     u8 optimized_task_type,
+						     u32 len,
+						     enum dma_data_direction dir)
+{
+	struct scu_task_context *task_context = ireq->tc;
+
+	/* Build the STP task context structure */
+	scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+	/* Copy over the SGL elements */
+	sci_request_build_sgl(ireq);
+
+	/* Copy over the number of bytes to be transfered */
+	task_context->transfer_length_bytes = len;
+
+	if (dir == DMA_TO_DEVICE) {
+		/*
+		 * The difference between the DMA IN and DMA OUT request task type
+		 * values are consistent with the difference between FPDMA READ
+		 * and FPDMA WRITE values.  Add the supplied task type parameter
+		 * to this difference to set the task type properly for this
+		 * DATA OUT (WRITE) case. */
+		task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+								 - SCU_TASK_TYPE_DMA_IN);
+	} else {
+		/*
+		 * For the DATA IN (READ) case, simply save the supplied
+		 * optimized task type. */
+		task_context->task_type = optimized_task_type;
+	}
+}
+
+static void sci_atapi_construct(struct isci_request *ireq)
+{
+	struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
+	struct sas_task *task;
+
+	/* To simplify the implementation we take advantage of the
+	 * silicon's partial acceleration of atapi protocol (dma data
+	 * transfers), so we promote all commands to dma protocol.  This
+	 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
+	 */
+	h2d_fis->features |= ATAPI_PKT_DMA;
+
+	scu_stp_raw_request_construct_task_context(ireq);
+
+	task = isci_request_access_task(ireq);
+	if (task->data_dir == DMA_NONE)
+		task->total_xfer_len = 0;
+
+	/* clear the response so we can detect arrivial of an
+	 * unsolicited h2d fis
+	 */
+	ireq->stp.rsp.fis_type = 0;
+}
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+			       u32 len,
+			       enum dma_data_direction dir,
+			       bool copy)
+{
+	enum sci_status status = SCI_SUCCESS;
+	struct sas_task *task = isci_request_access_task(ireq);
+	struct domain_device *dev = ireq->target_device->domain_dev;
+
+	/* check for management protocols */
+	if (test_bit(IREQ_TMF, &ireq->flags)) {
+		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+		dev_err(&ireq->owning_controller->pdev->dev,
+			"%s: Request 0x%p received un-handled SAT "
+			"management protocol 0x%x.\n",
+			__func__, ireq, tmf->tmf_code);
+
+		return SCI_FAILURE;
+	}
+
+	if (!sas_protocol_ata(task->task_proto)) {
+		dev_err(&ireq->owning_controller->pdev->dev,
+			"%s: Non-ATA protocol in SATA path: 0x%x\n",
+			__func__,
+			task->task_proto);
+		return SCI_FAILURE;
+
+	}
+
+	/* ATAPI */
+	if (dev->sata_dev.class == ATA_DEV_ATAPI &&
+	    task->ata_task.fis.command == ATA_CMD_PACKET) {
+		sci_atapi_construct(ireq);
+		return SCI_SUCCESS;
+	}
+
+	/* non data */
+	if (task->data_dir == DMA_NONE) {
+		scu_stp_raw_request_construct_task_context(ireq);
+		return SCI_SUCCESS;
+	}
+
+	/* NCQ */
+	if (task->ata_task.use_ncq) {
+		sci_stp_optimized_request_construct(ireq,
+							 SCU_TASK_TYPE_FPDMAQ_READ,
+							 len, dir);
+		return SCI_SUCCESS;
+	}
+
+	/* DMA */
+	if (task->ata_task.dma_xfer) {
+		sci_stp_optimized_request_construct(ireq,
+							 SCU_TASK_TYPE_DMA_IN,
+							 len, dir);
+		return SCI_SUCCESS;
+	} else /* PIO */
+		return sci_stp_pio_request_construct(ireq, copy);
+
+	return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+	struct sas_task *task = isci_request_access_task(ireq);
+
+	ireq->protocol = SAS_PROTOCOL_SSP;
+
+	scu_ssp_io_request_construct_task_context(ireq,
+						  task->data_dir,
+						  task->total_xfer_len);
+
+	sci_io_request_build_ssp_command_iu(ireq);
+
+	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+	struct isci_request *ireq)
+{
+	/* Construct the SSP Task SCU Task Context */
+	scu_ssp_task_request_construct_task_context(ireq);
+
+	/* Fill in the SSP Task IU */
+	sci_task_request_build_ssp_task_iu(ireq);
+
+	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+	return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+	enum sci_status status;
+	bool copy = false;
+	struct sas_task *task = isci_request_access_task(ireq);
+
+	ireq->protocol = SAS_PROTOCOL_STP;
+
+	copy = (task->data_dir == DMA_NONE) ? false : true;
+
+	status = sci_io_request_construct_sata(ireq,
+						task->total_xfer_len,
+						task->data_dir,
+						copy);
+
+	if (status == SCI_SUCCESS)
+		sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+	return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @ireq: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+	struct isci_host *ihost = ireq->owning_controller;
+	u32 ret_val = 0;
+
+	if (readl(&ihost->smu_registers->address_modifier) == 0) {
+		void __iomem *scu_reg_base = ihost->scu_registers;
+
+		/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+		 *   BAR1 is the scu_registers
+		 *   0x20002C = 0x200000 + 0x2c
+		 *            = start of task context SRAM + offset of (type.ssp.data_offset)
+		 *   TCi is the io_tag of struct sci_request
+		 */
+		ret_val = readl(scu_reg_base +
+				(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+				((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+	}
+
+	return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+	enum sci_base_request_states state;
+	struct scu_task_context *tc = ireq->tc;
+	struct isci_host *ihost = ireq->owning_controller;
+
+	state = ireq->sm.current_state_id;
+	if (state != SCI_REQ_CONSTRUCTED) {
+		dev_warn(&ihost->pdev->dev,
+			"%s: SCIC IO Request requested to start while in wrong "
+			 "state %d\n", __func__, state);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+	switch (tc->protocol_type) {
+	case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+	case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+		/* SSP/SMP Frame */
+		tc->type.ssp.tag = ireq->io_tag;
+		tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+		break;
+
+	case SCU_TASK_CONTEXT_PROTOCOL_STP:
+		/* STP/SATA Frame
+		 * tc->type.stp.ncq_tag = ireq->ncq_tag;
+		 */
+		break;
+
+	case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+		/* / @todo When do we set no protocol type? */
+		break;
+
+	default:
+		/* This should never happen since we build the IO
+		 * requests */
+		break;
+	}
+
+	/* Add to the post_context the io tag value */
+	ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+	/* Everything is good go ahead and change state */
+	sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+	return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+	enum sci_base_request_states state;
+
+	state = ireq->sm.current_state_id;
+
+	switch (state) {
+	case SCI_REQ_CONSTRUCTED:
+		/* Set to make sure no HW terminate posting is done: */
+		set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
+		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		return SCI_SUCCESS;
+	case SCI_REQ_STARTED:
+	case SCI_REQ_TASK_WAIT_TC_COMP:
+	case SCI_REQ_SMP_WAIT_RESP:
+	case SCI_REQ_SMP_WAIT_TC_COMP:
+	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+	case SCI_REQ_STP_UDMA_WAIT_D2H:
+	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+	case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+	case SCI_REQ_STP_PIO_WAIT_H2D:
+	case SCI_REQ_STP_PIO_WAIT_FRAME:
+	case SCI_REQ_STP_PIO_DATA_IN:
+	case SCI_REQ_STP_PIO_DATA_OUT:
+	case SCI_REQ_ATAPI_WAIT_H2D:
+	case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
+	case SCI_REQ_ATAPI_WAIT_D2H:
+	case SCI_REQ_ATAPI_WAIT_TC_COMP:
+		/* Fall through and change state to ABORTING... */
+	case SCI_REQ_TASK_WAIT_TC_RESP:
+		/* The task frame was already confirmed to have been
+		 * sent by the SCU HW.  Since the state machine is
+		 * now only waiting for the task response itself,
+		 * abort the request and complete it immediately
+		 * and don't wait for the task response.
+		 */
+		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+		/* Fall through and handle like ABORTING... */
+	case SCI_REQ_ABORTING:
+		if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
+			set_bit(IREQ_PENDING_ABORT, &ireq->flags);
+		else
+			clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
+		/* If the request is only waiting on the remote device
+		 * suspension, return SUCCESS so the caller will wait too.
+		 */
+		return SCI_SUCCESS;
+	case SCI_REQ_COMPLETED:
+	default:
+		dev_warn(&ireq->owning_controller->pdev->dev,
+			 "%s: SCIC IO Request requested to abort while in wrong "
+			 "state %d\n", __func__, ireq->sm.current_state_id);
+		break;
+	}
+
+	return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+	enum sci_base_request_states state;
+	struct isci_host *ihost = ireq->owning_controller;
+
+	state = ireq->sm.current_state_id;
+	if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+		      "isci: request completion from wrong state (%s)\n",
+		      req_state_name(state)))
+		return SCI_FAILURE_INVALID_STATE;
+
+	if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+		sci_controller_release_frame(ihost,
+						  ireq->saved_rx_frame_index);
+
+	/* XXX can we just stop the machine and remove the 'final' state? */
+	sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+	return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+						  u32 event_code)
+{
+	enum sci_base_request_states state;
+	struct isci_host *ihost = ireq->owning_controller;
+
+	state = ireq->sm.current_state_id;
+
+	if (state != SCI_REQ_STP_PIO_DATA_IN) {
+		dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
+			 __func__, event_code, req_state_name(state));
+
+		return SCI_FAILURE_INVALID_STATE;
+	}
+
+	switch (scu_get_event_specifier(event_code)) {
+	case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+		/* We are waiting for data and the SCU has R_ERR the data frame.
+		 * Go back to waiting for the D2H Register FIS
+		 */
+		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+		return SCI_SUCCESS;
+	default:
+		dev_err(&ihost->pdev->dev,
+			"%s: pio request unexpected event %#x\n",
+			__func__, event_code);
+
+		/* TODO Should we fail the PIO request when we get an
+		 * unexpected event?
+		 */
+		return SCI_FAILURE;
+	}
+}
+
+/*
+ * This function copies response data for requests returning response data
+ *    instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ *    the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+	void *resp_buf;
+	u32 len;
+	struct ssp_response_iu *ssp_response;
+	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+	ssp_response = &ireq->ssp.rsp;
+
+	resp_buf = &isci_tmf->resp.resp_iu;
+
+	len = min_t(u32,
+		    SSP_RESP_IU_MAX_SIZE,
+		    be32_to_cpu(ssp_response->response_data_len));
+
+	memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+			       u32 completion_code)
+{
+	struct ssp_response_iu *resp_iu;
+	u8 datapres;
+
+	/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+	 * to determine SDMA status
+	 */
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		break;
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+		/* There are times when the SCU hardware will return an early
+		 * response because the io request specified more data than is
+		 * returned by the target device (mode pages, inquiry data,
+		 * etc.).  We must check the response stats to see if this is
+		 * truly a failed request or a good request that just got
+		 * completed early.
+		 */
+		struct ssp_response_iu *resp = &ireq->ssp.rsp;
+		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+		sci_swab32_cpy(&ireq->ssp.rsp,
+			       &ireq->ssp.rsp,
+			       word_cnt);
+
+		if (resp->status == 0) {
+			ireq->scu_status = SCU_TASK_DONE_GOOD;
+			ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+		} else {
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+		}
+		break;
+	}
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+		sci_swab32_cpy(&ireq->ssp.rsp,
+			       &ireq->ssp.rsp,
+			       word_cnt);
+
+		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+		break;
+	}
+
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+		/* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+		 * guaranteed to be received before this completion status is
+		 * posted?
+		 */
+		resp_iu = &ireq->ssp.rsp;
+		datapres = resp_iu->datapres;
+
+		if (datapres == 1 || datapres == 2) {
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+		} else {
+			ireq->scu_status = SCU_TASK_DONE_GOOD;
+			ireq->sci_status = SCI_SUCCESS;
+		}
+		break;
+	/* only stp device gets suspended. */
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+		if (ireq->protocol == SAS_PROTOCOL_STP) {
+			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+					   SCU_COMPLETION_TL_STATUS_SHIFT;
+			ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+		} else {
+			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+					   SCU_COMPLETION_TL_STATUS_SHIFT;
+			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		}
+		break;
+
+	/* both stp/ssp device gets suspended */
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+				   SCU_COMPLETION_TL_STATUS_SHIFT;
+		ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+		break;
+
+	/* neither ssp nor stp gets suspended. */
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+	default:
+		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+				   SCU_COMPLETION_TL_STATUS_SHIFT;
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		break;
+	}
+
+	/*
+	 * TODO: This is probably wrong for ACK/NAK timeout conditions
+	 */
+
+	/* In all cases we will treat this as the completion of the IO req. */
+	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+	return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+				u32 completion_code)
+{
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+	case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+
+	default:
+		/* Unless we get some strange error wait for the task abort to complete
+		 * TODO: Should there be a state change for this completion?
+		 */
+		break;
+	}
+
+	return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+						       u32 completion_code)
+{
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+		break;
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+		/* Currently, the decision is to simply allow the task request
+		 * to timeout if the task IU wasn't received successfully.
+		 * There is a potential for receiving multiple task responses if
+		 * we decide to send the task IU again.
+		 */
+		dev_warn(&ireq->owning_controller->pdev->dev,
+			 "%s: TaskRequest:0x%p CompletionCode:%x - "
+			 "ACK/NAK timeout\n", __func__, ireq,
+			 completion_code);
+
+		sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+		break;
+	default:
+		/*
+		 * All other completion status cause the IO to be complete.
+		 * If a NAK was received, then it is up to the user to retry
+		 * the request.
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+				    u32 completion_code)
+{
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		/* In the AWAIT RESPONSE state, any TC completion is
+		 * unexpected.  but if the TC has success status, we
+		 * complete the IO anyway.
+		 */
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+		/* These status has been seen in a specific LSI
+		 * expander, which sometimes is not able to send smp
+		 * response within 2 ms. This causes our hardware break
+		 * the connection and set TC completion with one of
+		 * these SMP_XXX_XX_ERR status. For these type of error,
+		 * we ask ihost user to retry the request.
+		 */
+		ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+		ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	default:
+		/* All other completion status cause the IO to be complete.  If a NAK
+		 * was received, then it is up to the user to retry the request
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+			   u32 completion_code)
+{
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	default:
+		/* All other completion status cause the IO to be
+		 * complete.  If a NAK was received, then it is up to
+		 * the user to retry the request.
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+	struct scu_sgl_element *sgl;
+	struct scu_sgl_element_pair *sgl_pair;
+	struct isci_request *ireq = to_ireq(stp_req);
+	struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+	sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+	if (!sgl_pair)
+		sgl = NULL;
+	else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+		if (sgl_pair->B.address_lower == 0 &&
+		    sgl_pair->B.address_upper == 0) {
+			sgl = NULL;
+		} else {
+			pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+			sgl = &sgl_pair->B;
+		}
+	} else {
+		if (sgl_pair->next_pair_lower == 0 &&
+		    sgl_pair->next_pair_upper == 0) {
+			sgl = NULL;
+		} else {
+			pio_sgl->index++;
+			pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+			sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+			sgl = &sgl_pair->A;
+		}
+	}
+
+	return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+					u32 completion_code)
+{
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+		break;
+
+	default:
+		/* All other completion status cause the IO to be
+		 * complete.  If a NAK was received, then it is up to
+		 * the user to retry the request.
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+	struct isci_request *ireq,
+	u32 length)
+{
+	struct isci_stp_request *stp_req = &ireq->stp.req;
+	struct scu_task_context *task_context = ireq->tc;
+	struct scu_sgl_element_pair *sgl_pair;
+	struct scu_sgl_element *current_sgl;
+
+	/* Recycle the TC and reconstruct it for sending out DATA FIS containing
+	 * for the data from current_sgl+offset for the input length
+	 */
+	sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+	if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+		current_sgl = &sgl_pair->A;
+	else
+		current_sgl = &sgl_pair->B;
+
+	/* update the TC */
+	task_context->command_iu_upper = current_sgl->address_upper;
+	task_context->command_iu_lower = current_sgl->address_lower;
+	task_context->transfer_length_bytes = length;
+	task_context->type.stp.fis_type = FIS_DATA;
+
+	/* send the new TC out. */
+	return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+	struct isci_stp_request *stp_req = &ireq->stp.req;
+	struct scu_sgl_element_pair *sgl_pair;
+	enum sci_status status = SCI_SUCCESS;
+	struct scu_sgl_element *sgl;
+	u32 offset;
+	u32 len = 0;
+
+	offset = stp_req->sgl.offset;
+	sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+	if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+		return SCI_FAILURE;
+
+	if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+		sgl = &sgl_pair->A;
+		len = sgl_pair->A.length - offset;
+	} else {
+		sgl = &sgl_pair->B;
+		len = sgl_pair->B.length - offset;
+	}
+
+	if (stp_req->pio_len == 0)
+		return SCI_SUCCESS;
+
+	if (stp_req->pio_len >= len) {
+		status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+		if (status != SCI_SUCCESS)
+			return status;
+		stp_req->pio_len -= len;
+
+		/* update the current sgl, offset and save for future */
+		sgl = pio_sgl_next(stp_req);
+		offset = 0;
+	} else if (stp_req->pio_len < len) {
+		sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+		/* Sgl offset will be adjusted and saved for future */
+		offset += stp_req->pio_len;
+		sgl->address_lower += stp_req->pio_len;
+		stp_req->pio_len = 0;
+	}
+
+	stp_req->sgl.offset = offset;
+
+	return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+					     u8 *data_buf, u32 len)
+{
+	struct isci_request *ireq;
+	u8 *src_addr;
+	int copy_len;
+	struct sas_task *task;
+	struct scatterlist *sg;
+	void *kaddr;
+	int total_len = len;
+
+	ireq = to_ireq(stp_req);
+	task = isci_request_access_task(ireq);
+	src_addr = data_buf;
+
+	if (task->num_scatter > 0) {
+		sg = task->scatter;
+
+		while (total_len > 0) {
+			struct page *page = sg_page(sg);
+
+			copy_len = min_t(int, total_len, sg_dma_len(sg));
+			kaddr = kmap_atomic(page);
+			memcpy(kaddr + sg->offset, src_addr, copy_len);
+			kunmap_atomic(kaddr);
+			total_len -= copy_len;
+			src_addr += copy_len;
+			sg = sg_next(sg);
+		}
+	} else {
+		BUG_ON(task->total_xfer_len < total_len);
+		memcpy(task->scatter, src_addr, total_len);
+	}
+
+	return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+	struct isci_stp_request *stp_req,
+	u8 *data_buffer)
+{
+	enum sci_status status;
+
+	/*
+	 * If there is less than 1K remaining in the transfer request
+	 * copy just the data for the transfer */
+	if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+		status = sci_stp_request_pio_data_in_copy_data_buffer(
+			stp_req, data_buffer, stp_req->pio_len);
+
+		if (status == SCI_SUCCESS)
+			stp_req->pio_len = 0;
+	} else {
+		/* We are transfering the whole frame so copy */
+		status = sci_stp_request_pio_data_in_copy_data_buffer(
+			stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+		if (status == SCI_SUCCESS)
+			stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+	}
+
+	return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+					      u32 completion_code)
+{
+	enum sci_status status = SCI_SUCCESS;
+
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+		break;
+
+	default:
+		/* All other completion status cause the IO to be
+		 * complete.  If a NAK was received, then it is up to
+		 * the user to retry the request.
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+			      u32 completion_code)
+{
+	enum sci_status status = SCI_SUCCESS;
+	bool all_frames_transferred = false;
+	struct isci_stp_request *stp_req = &ireq->stp.req;
+
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		/* Transmit data */
+		if (stp_req->pio_len != 0) {
+			status = sci_stp_request_pio_data_out_transmit_data(ireq);
+			if (status == SCI_SUCCESS) {
+				if (stp_req->pio_len == 0)
+					all_frames_transferred = true;
+			}
+		} else if (stp_req->pio_len == 0) {
+			/*
+			 * this will happen if the all data is written at the
+			 * first time after the pio setup fis is received
+			 */
+			all_frames_transferred  = true;
+		}
+
+		/* all data transferred. */
+		if (all_frames_transferred) {
+			/*
+			 * Change the state to SCI_REQ_STP_PIO_DATA_IN
+			 * and wait for PIO_SETUP fis / or D2H REg fis. */
+			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+		}
+		break;
+
+	default:
+		/*
+		 * All other completion status cause the IO to be complete.
+		 * If a NAK was received, then it is up to the user to retry
+		 * the request.
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+								       u32 frame_index)
+{
+	struct isci_host *ihost = ireq->owning_controller;
+	struct dev_to_host_fis *frame_header;
+	enum sci_status status;
+	u32 *frame_buffer;
+
+	status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+							       frame_index,
+							       (void **)&frame_header);
+
+	if ((status == SCI_SUCCESS) &&
+	    (frame_header->fis_type == FIS_REGD2H)) {
+		sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+							      frame_index,
+							      (void **)&frame_buffer);
+
+		sci_controller_copy_sata_response(&ireq->stp.rsp,
+						       frame_header,
+						       frame_buffer);
+	}
+
+	sci_controller_release_frame(ihost, frame_index);
+
+	return status;
+}
+
+static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
+					       u32 frame_index)
+{
+	struct isci_host *ihost = ireq->owning_controller;
+	enum sci_status status;
+	struct dev_to_host_fis *frame_header;
+	u32 *frame_buffer;
+
+	status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+							  frame_index,
+							  (void **)&frame_header);
+
+	if (status != SCI_SUCCESS)
+		return status;
+
+	if (frame_header->fis_type != FIS_REGD2H) {
+		dev_err(&ireq->isci_host->pdev->dev,
+			"%s ERROR: invalid fis type 0x%X\n",
+			__func__, frame_header->fis_type);
+		return SCI_FAILURE;
+	}
+
+	sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+						 frame_index,
+						 (void **)&frame_buffer);
+
+	sci_controller_copy_sata_response(&ireq->stp.rsp,
+					  (u32 *)frame_header,
+					  frame_buffer);
+
+	/* Frame has been decoded return it to the controller */
+	sci_controller_release_frame(ihost, frame_index);
+
+	return status;
+}
+
+static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
+						   u32 frame_index)
+{
+	struct sas_task *task = isci_request_access_task(ireq);
+	enum sci_status status;
+
+	status = process_unsolicited_fis(ireq, frame_index);
+
+	if (status == SCI_SUCCESS) {
+		if (ireq->stp.rsp.status & ATA_ERR)
+			status = SCI_IO_FAILURE_RESPONSE_VALID;
+	} else {
+		status = SCI_IO_FAILURE_RESPONSE_VALID;
+	}
+
+	if (status != SCI_SUCCESS) {
+		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+		ireq->sci_status = status;
+	} else {
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+	}
+
+	/* the d2h ufi is the end of non-data commands */
+	if (task->data_dir == DMA_NONE)
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+	return status;
+}
+
+static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
+{
+	struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
+	void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
+	struct scu_task_context *task_context = ireq->tc;
+
+	/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
+	 * type. The TC for previous Packet fis was already there, we only need to
+	 * change the H2D fis content.
+	 */
+	memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
+	memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
+	memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
+	task_context->type.stp.fis_type = FIS_DATA;
+	task_context->transfer_length_bytes = dev->cdb_len;
+}
+
+static void scu_atapi_construct_task_context(struct isci_request *ireq)
+{
+	struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
+	struct sas_task *task = isci_request_access_task(ireq);
+	struct scu_task_context *task_context = ireq->tc;
+	int cdb_len = dev->cdb_len;
+
+	/* reference: SSTL 1.13.4.2
+	 * task_type, sata_direction
+	 */
+	if (task->data_dir == DMA_TO_DEVICE) {
+		task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
+		task_context->sata_direction = 0;
+	} else {
+		/* todo: for NO_DATA command, we need to send out raw frame. */
+		task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
+		task_context->sata_direction = 1;
+	}
+
+	memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
+	task_context->type.stp.fis_type = FIS_DATA;
+
+	memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+	memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
+	task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
+
+	/* task phase is set to TX_CMD */
+	task_context->task_phase = 0x1;
+
+	/* retry counter */
+	task_context->stp_retry_count = 0;
+
+	/* data transfer size. */
+	task_context->transfer_length_bytes = task->total_xfer_len;
+
+	/* setup sgl */
+	sci_request_build_sgl(ireq);
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+				  u32 frame_index)
+{
+	struct isci_host *ihost = ireq->owning_controller;
+	struct isci_stp_request *stp_req = &ireq->stp.req;
+	enum sci_base_request_states state;
+	enum sci_status status;
+	ssize_t word_cnt;
+
+	state = ireq->sm.current_state_id;
+	switch (state)  {
+	case SCI_REQ_STARTED: {
+		struct ssp_frame_hdr ssp_hdr;
+		void *frame_header;
+
+		sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+							      frame_index,
+							      &frame_header);
+
+		word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+		sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+		if (ssp_hdr.frame_type == SSP_RESPONSE) {
+			struct ssp_response_iu *resp_iu;
+			ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								      frame_index,
+								      (void **)&resp_iu);
+
+			sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+			resp_iu = &ireq->ssp.rsp;
+
+			if (resp_iu->datapres == 0x01 ||
+			    resp_iu->datapres == 0x02) {
+				ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+				ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+			} else {
+				ireq->scu_status = SCU_TASK_DONE_GOOD;
+				ireq->sci_status = SCI_SUCCESS;
+			}
+		} else {
+			/* not a response frame, why did it get forwarded? */
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC IO Request 0x%p received unexpected "
+				"frame %d type 0x%02x\n", __func__, ireq,
+				frame_index, ssp_hdr.frame_type);
+		}
+
+		/*
+		 * In any case we are done with this frame buffer return it to
+		 * the controller
+		 */
+		sci_controller_release_frame(ihost, frame_index);
+
+		return SCI_SUCCESS;
+	}
+
+	case SCI_REQ_TASK_WAIT_TC_RESP:
+		sci_io_request_copy_response(ireq);
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		sci_controller_release_frame(ihost, frame_index);
+		return SCI_SUCCESS;
+
+	case SCI_REQ_SMP_WAIT_RESP: {
+		struct sas_task *task = isci_request_access_task(ireq);
+		struct scatterlist *sg = &task->smp_task.smp_resp;
+		void *frame_header, *kaddr;
+		u8 *rsp;
+
+		sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+							 frame_index,
+							 &frame_header);
+		kaddr = kmap_atomic(sg_page(sg));
+		rsp = kaddr + sg->offset;
+		sci_swab32_cpy(rsp, frame_header, 1);
+
+		if (rsp[0] == SMP_RESPONSE) {
+			void *smp_resp;
+
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								 frame_index,
+								 &smp_resp);
+
+			word_cnt = (sg->length/4)-1;
+			if (word_cnt > 0)
+				word_cnt = min_t(unsigned int, word_cnt,
+						 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
+			sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
+
+			ireq->scu_status = SCU_TASK_DONE_GOOD;
+			ireq->sci_status = SCI_SUCCESS;
+			sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+		} else {
+			/*
+			 * This was not a response frame why did it get
+			 * forwarded?
+			 */
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC SMP Request 0x%p received unexpected "
+				"frame %d type 0x%02x\n",
+				__func__,
+				ireq,
+				frame_index,
+				rsp[0]);
+
+			ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		}
+		kunmap_atomic(kaddr);
+
+		sci_controller_release_frame(ihost, frame_index);
+
+		return SCI_SUCCESS;
+	}
+
+	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+		return sci_stp_request_udma_general_frame_handler(ireq,
+								       frame_index);
+
+	case SCI_REQ_STP_UDMA_WAIT_D2H:
+		/* Use the general frame handler to copy the resposne data */
+		status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+		if (status != SCI_SUCCESS)
+			return status;
+
+		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		return SCI_SUCCESS;
+
+	case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+		struct dev_to_host_fis *frame_header;
+		u32 *frame_buffer;
+
+		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								       frame_index,
+								       (void **)&frame_header);
+
+		if (status != SCI_SUCCESS) {
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC IO Request 0x%p could not get frame "
+				"header for frame index %d, status %x\n",
+				__func__,
+				stp_req,
+				frame_index,
+				status);
+
+			return status;
+		}
+
+		switch (frame_header->fis_type) {
+		case FIS_REGD2H:
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								      frame_index,
+								      (void **)&frame_buffer);
+
+			sci_controller_copy_sata_response(&ireq->stp.rsp,
+							       frame_header,
+							       frame_buffer);
+
+			/* The command has completed with error */
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+			break;
+
+		default:
+			dev_warn(&ihost->pdev->dev,
+				 "%s: IO Request:0x%p Frame Id:%d protocol "
+				  "violation occurred\n", __func__, stp_req,
+				  frame_index);
+
+			ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+			ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+			break;
+		}
+
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+		/* Frame has been decoded return it to the controller */
+		sci_controller_release_frame(ihost, frame_index);
+
+		return status;
+	}
+
+	case SCI_REQ_STP_PIO_WAIT_FRAME: {
+		struct sas_task *task = isci_request_access_task(ireq);
+		struct dev_to_host_fis *frame_header;
+		u32 *frame_buffer;
+
+		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								       frame_index,
+								       (void **)&frame_header);
+
+		if (status != SCI_SUCCESS) {
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC IO Request 0x%p could not get frame "
+				"header for frame index %d, status %x\n",
+				__func__, stp_req, frame_index, status);
+			return status;
+		}
+
+		switch (frame_header->fis_type) {
+		case FIS_PIO_SETUP:
+			/* Get from the frame buffer the PIO Setup Data */
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								      frame_index,
+								      (void **)&frame_buffer);
+
+			/* Get the data from the PIO Setup The SCU Hardware
+			 * returns first word in the frame_header and the rest
+			 * of the data is in the frame buffer so we need to
+			 * back up one dword
+			 */
+
+			/* transfer_count: first 16bits in the 4th dword */
+			stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+			/* status: 4th byte in the 3rd dword */
+			stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+			sci_controller_copy_sata_response(&ireq->stp.rsp,
+							       frame_header,
+							       frame_buffer);
+
+			ireq->stp.rsp.status = stp_req->status;
+
+			/* The next state is dependent on whether the
+			 * request was PIO Data-in or Data out
+			 */
+			if (task->data_dir == DMA_FROM_DEVICE) {
+				sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+			} else if (task->data_dir == DMA_TO_DEVICE) {
+				/* Transmit data */
+				status = sci_stp_request_pio_data_out_transmit_data(ireq);
+				if (status != SCI_SUCCESS)
+					break;
+				sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+			}
+			break;
+
+		case FIS_SETDEVBITS:
+			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+			break;
+
+		case FIS_REGD2H:
+			if (frame_header->status & ATA_BUSY) {
+				/*
+				 * Now why is the drive sending a D2H Register
+				 * FIS when it is still busy?  Do nothing since
+				 * we are still in the right state.
+				 */
+				dev_dbg(&ihost->pdev->dev,
+					"%s: SCIC PIO Request 0x%p received "
+					"D2H Register FIS with BSY status "
+					"0x%x\n",
+					__func__,
+					stp_req,
+					frame_header->status);
+				break;
+			}
+
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								      frame_index,
+								      (void **)&frame_buffer);
+
+			sci_controller_copy_sata_response(&ireq->stp.rsp,
+							       frame_header,
+							       frame_buffer);
+
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+			break;
+
+		default:
+			/* FIXME: what do we do here? */
+			break;
+		}
+
+		/* Frame is decoded return it to the controller */
+		sci_controller_release_frame(ihost, frame_index);
+
+		return status;
+	}
+
+	case SCI_REQ_STP_PIO_DATA_IN: {
+		struct dev_to_host_fis *frame_header;
+		struct sata_fis_data *frame_buffer;
+
+		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+								       frame_index,
+								       (void **)&frame_header);
+
+		if (status != SCI_SUCCESS) {
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC IO Request 0x%p could not get frame "
+				"header for frame index %d, status %x\n",
+				__func__,
+				stp_req,
+				frame_index,
+				status);
+			return status;
+		}
+
+		if (frame_header->fis_type != FIS_DATA) {
+			dev_err(&ihost->pdev->dev,
+				"%s: SCIC PIO Request 0x%p received frame %d "
+				"with fis type 0x%02x when expecting a data "
+				"fis.\n",
+				__func__,
+				stp_req,
+				frame_index,
+				frame_header->fis_type);
+
+			ireq->scu_status = SCU_TASK_DONE_GOOD;
+			ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+			/* Frame is decoded return it to the controller */
+			sci_controller_release_frame(ihost, frame_index);
+			return status;
+		}
+
+		if (stp_req->sgl.index < 0) {
+			ireq->saved_rx_frame_index = frame_index;
+			stp_req->pio_len = 0;
+		} else {
+			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+								      frame_index,
+								      (void **)&frame_buffer);
+
+			status = sci_stp_request_pio_data_in_copy_data(stp_req,
+									    (u8 *)frame_buffer);
+
+			/* Frame is decoded return it to the controller */
+			sci_controller_release_frame(ihost, frame_index);
+		}
+
+		/* Check for the end of the transfer, are there more
+		 * bytes remaining for this data transfer
+		 */
+		if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+			return status;
+
+		if ((stp_req->status & ATA_BUSY) == 0) {
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		} else {
+			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+		}
+		return status;
+	}
+
+	case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
+		struct sas_task *task = isci_request_access_task(ireq);
+
+		sci_controller_release_frame(ihost, frame_index);
+		ireq->target_device->working_request = ireq;
+		if (task->data_dir == DMA_NONE) {
+			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
+			scu_atapi_reconstruct_raw_frame_task_context(ireq);
+		} else {
+			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
+			scu_atapi_construct_task_context(ireq);
+		}
+
+		sci_controller_continue_io(ireq);
+		return SCI_SUCCESS;
+	}
+	case SCI_REQ_ATAPI_WAIT_D2H:
+		return atapi_d2h_reg_frame_handler(ireq, frame_index);
+	case SCI_REQ_ABORTING:
+		/*
+		 * TODO: Is it even possible to get an unsolicited frame in the
+		 * aborting state?
+		 */
+		sci_controller_release_frame(ihost, frame_index);
+		return SCI_SUCCESS;
+
+	default:
+		dev_warn(&ihost->pdev->dev,
+			 "%s: SCIC IO Request given unexpected frame %x while "
+			 "in state %d\n",
+			 __func__,
+			 frame_index,
+			 state);
+
+		sci_controller_release_frame(ihost, frame_index);
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+						       u32 completion_code)
+{
+	enum sci_status status = SCI_SUCCESS;
+
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+		/* We must check ther response buffer to see if the D2H
+		 * Register FIS was received before we got the TC
+		 * completion.
+		 */
+		if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+			sci_remote_device_suspend(ireq->target_device,
+						  SCI_SW_SUSPEND_NORMAL);
+
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		} else {
+			/* If we have an error completion status for the
+			 * TC then we can expect a D2H register FIS from
+			 * the device so we must change state to wait
+			 * for it
+			 */
+			sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+		}
+		break;
+
+	/* TODO Check to see if any of these completion status need to
+	 * wait for the device to host register fis.
+	 */
+	/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+	 * - this comes only for B0
+	 */
+	default:
+		/* All other completion status cause the IO to be complete. */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return status;
+}
+
+static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
+						  enum sci_base_request_states next)
+{
+	enum sci_status status = SCI_SUCCESS;
+
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, next);
+		break;
+	default:
+		/* All other completion status cause the IO to be complete.
+		 * If a NAK was received, then it is up to the user to retry
+		 * the request.
+		 */
+		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+	}
+
+	return status;
+}
+
+static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
+							u32 completion_code)
+{
+	struct isci_remote_device *idev = ireq->target_device;
+	struct dev_to_host_fis *d2h = &ireq->stp.rsp;
+	enum sci_status status = SCI_SUCCESS;
+
+	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+
+	case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
+		u16 len = sci_req_tx_bytes(ireq);
+
+		/* likely non-error data underrrun, workaround missing
+		 * d2h frame from the controller
+		 */
+		if (d2h->fis_type != FIS_REGD2H) {
+			d2h->fis_type = FIS_REGD2H;
+			d2h->flags = (1 << 6);
+			d2h->status = 0x50;
+			d2h->error = 0;
+			d2h->lbal = 0;
+			d2h->byte_count_low = len & 0xff;
+			d2h->byte_count_high = len >> 8;
+			d2h->device = 0xa0;
+			d2h->lbal_exp = 0;
+			d2h->lbam_exp = 0;
+			d2h->lbah_exp = 0;
+			d2h->_r_a = 0;
+			d2h->sector_count = 0x3;
+			d2h->sector_count_exp = 0;
+			d2h->_r_b = 0;
+			d2h->_r_c = 0;
+			d2h->_r_d = 0;
+		}
+
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+		status = ireq->sci_status;
+
+		/* the hw will have suspended the rnc, so complete the
+		 * request upon pending resume
+		 */
+		sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
+		break;
+	}
+	case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
+		/* In this case, there is no UF coming after.
+		 * compelte the IO now.
+		 */
+		ireq->scu_status = SCU_TASK_DONE_GOOD;
+		ireq->sci_status = SCI_SUCCESS;
+		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+		break;
+
+	default:
+		if (d2h->fis_type == FIS_REGD2H) {
+			/* UF received change the device state to ATAPI_ERROR */
+			status = ireq->sci_status;
+			sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
+		} else {
+			/* If receiving any non-success TC status, no UF
+			 * received yet, then an UF for the status fis
+			 * is coming after (XXX: suspect this is
+			 * actually a protocol error or a bug like the
+			 * DONE_UNEXP_FIS case)
+			 */
+			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+
+			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
+		}
+		break;
+	}
+
+	return status;
+}
+
+static int sci_request_smp_completion_status_is_tx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+		return 1;
+	}
+	return 0;
+}
+
+static int sci_request_smp_completion_status_is_tx_rx_suspend(
+	unsigned int completion_status)
+{
+	return 0; /* There are no Tx/Rx SMP suspend conditions. */
+}
+
+static int sci_request_ssp_completion_status_is_tx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+	case SCU_TASK_DONE_LF_ERR:
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+		return 1;
+	}
+	return 0;
+}
+
+static int sci_request_ssp_completion_status_is_tx_rx_suspend(
+	unsigned int completion_status)
+{
+	return 0; /* There are no Tx/Rx SSP suspend conditions. */
+}
+
+static int sci_request_stpsata_completion_status_is_tx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+	case SCU_TASK_DONE_LL_R_ERR:
+	case SCU_TASK_DONE_LL_PERR:
+	case SCU_TASK_DONE_REG_ERR:
+	case SCU_TASK_DONE_SDB_ERR:
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+		return 1;
+	}
+	return 0;
+}
+
+
+static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_DONE_LF_ERR:
+	case SCU_TASK_DONE_LL_SY_TERM:
+	case SCU_TASK_DONE_LL_LF_TERM:
+	case SCU_TASK_DONE_BREAK_RCVD:
+	case SCU_TASK_DONE_INV_FIS_LEN:
+	case SCU_TASK_DONE_UNEXP_FIS:
+	case SCU_TASK_DONE_UNEXP_SDBFIS:
+	case SCU_TASK_DONE_MAX_PLD_ERR:
+		return 1;
+	}
+	return 0;
+}
+
+static void sci_request_handle_suspending_completions(
+	struct isci_request *ireq,
+	u32 completion_code)
+{
+	int is_tx = 0;
+	int is_tx_rx = 0;
+
+	switch (ireq->protocol) {
+	case SAS_PROTOCOL_SMP:
+		is_tx = sci_request_smp_completion_status_is_tx_suspend(
+			completion_code);
+		is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
+			completion_code);
+		break;
+	case SAS_PROTOCOL_SSP:
+		is_tx = sci_request_ssp_completion_status_is_tx_suspend(
+			completion_code);
+		is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
+			completion_code);
+		break;
+	case SAS_PROTOCOL_STP:
+		is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
+			completion_code);
+		is_tx_rx =
+			sci_request_stpsata_completion_status_is_tx_rx_suspend(
+				completion_code);
+		break;
+	default:
+		dev_warn(&ireq->isci_host->pdev->dev,
+			 "%s: request %p has no valid protocol\n",
+			 __func__, ireq);
+		break;
+	}
+	if (is_tx || is_tx_rx) {
+		BUG_ON(is_tx && is_tx_rx);
+
+		sci_remote_node_context_suspend(
+			&ireq->target_device->rnc,
+			SCI_HW_SUSPEND,
+			(is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+				   : SCU_EVENT_TL_RNC_SUSPEND_TX);
+	}
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+			     u32 completion_code)
+{
+	enum sci_base_request_states state;
+	struct isci_host *ihost = ireq->owning_controller;
+
+	state = ireq->sm.current_state_id;
+
+	/* Decode those completions that signal upcoming suspension events. */
+	sci_request_handle_suspending_completions(
+		ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
+
+	switch (state) {
+	case SCI_REQ_STARTED:
+		return request_started_state_tc_event(ireq, completion_code);
+
+	case SCI_REQ_TASK_WAIT_TC_COMP:
+		return ssp_task_request_await_tc_event(ireq,
+						       completion_code);
+
+	case SCI_REQ_SMP_WAIT_RESP:
+		return smp_request_await_response_tc_event(ireq,
+							   completion_code);
+
+	case SCI_REQ_SMP_WAIT_TC_COMP:
+		return smp_request_await_tc_event(ireq, completion_code);
+
+	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+		return stp_request_udma_await_tc_event(ireq,
+						       completion_code);
+
+	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+		return stp_request_non_data_await_h2d_tc_event(ireq,
+							       completion_code);
+
+	case SCI_REQ_STP_PIO_WAIT_H2D:
+		return stp_request_pio_await_h2d_completion_tc_event(ireq,
+								     completion_code);
+
+	case SCI_REQ_STP_PIO_DATA_OUT:
+		return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+	case SCI_REQ_ABORTING:
+		return request_aborting_state_tc_event(ireq,
+						       completion_code);
+
+	case SCI_REQ_ATAPI_WAIT_H2D:
+		return atapi_raw_completion(ireq, completion_code,
+					    SCI_REQ_ATAPI_WAIT_PIO_SETUP);
+
+	case SCI_REQ_ATAPI_WAIT_TC_COMP:
+		return atapi_raw_completion(ireq, completion_code,
+					    SCI_REQ_ATAPI_WAIT_D2H);
+
+	case SCI_REQ_ATAPI_WAIT_D2H:
+		return atapi_data_tc_completion_handler(ireq, completion_code);
+
+	default:
+		dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
+			 __func__, completion_code, req_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
+	}
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ *    response iu, in the task struct, from the request object for the upper
+ *    layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+	struct sas_task *task,
+	struct ssp_response_iu *resp_iu,
+	struct device *dev)
+{
+	dev_dbg(dev,
+		"%s: resp_iu = %p "
+		"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+		"resp_iu->response_data_len = %x, "
+		"resp_iu->sense_data_len = %x\nrepsonse data: ",
+		__func__,
+		resp_iu,
+		resp_iu->status,
+		resp_iu->datapres,
+		resp_iu->response_data_len,
+		resp_iu->sense_data_len);
+
+	task->task_status.stat = resp_iu->status;
+
+	/* libsas updates the task status fields based on the response iu. */
+	sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ *    completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ *    abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+	struct isci_request *request,
+	struct sas_task *task,
+	enum service_response *response_ptr,
+	enum exec_status *status_ptr,
+	enum sas_open_rej_reason open_rej_reason)
+{
+	/* Task in the target is done. */
+	set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+	*response_ptr                     = SAS_TASK_UNDELIVERED;
+	*status_ptr                       = SAS_OPEN_REJECT;
+	task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ *    controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+	struct isci_remote_device *idev,
+	struct isci_request *request,
+	struct sas_task *task,
+	enum service_response *response_ptr,
+	enum exec_status *status_ptr)
+{
+	unsigned int cstatus;
+
+	cstatus = request->scu_status;
+
+	dev_dbg(&request->isci_host->pdev->dev,
+		"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+		"- controller status = 0x%x\n",
+		__func__, request, cstatus);
+
+	/* Decode the controller-specific errors; most
+	 * important is to recognize those conditions in which
+	 * the target may still have a task outstanding that
+	 * must be aborted.
+	 *
+	 * Note that there are SCU completion codes being
+	 * named in the decode below for which SCIC has already
+	 * done work to handle them in a way other than as
+	 * a controller-specific completion code; these are left
+	 * in the decode below for completeness sake.
+	 */
+	switch (cstatus) {
+	case SCU_TASK_DONE_DMASETUP_DIRERR:
+	/* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+	case SCU_TASK_DONE_XFERCNT_ERR:
+		/* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+		if (task->task_proto == SAS_PROTOCOL_SMP) {
+			/* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+			*response_ptr = SAS_TASK_COMPLETE;
+
+			/* See if the device has been/is being stopped. Note
+			 * that we ignore the quiesce state, since we are
+			 * concerned about the actual device state.
+			 */
+			if (!idev)
+				*status_ptr = SAS_DEVICE_UNKNOWN;
+			else
+				*status_ptr = SAS_ABORTED_TASK;
+
+			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		} else {
+			/* Task in the target is not done. */
+			*response_ptr = SAS_TASK_UNDELIVERED;
+
+			if (!idev)
+				*status_ptr = SAS_DEVICE_UNKNOWN;
+			else
+				*status_ptr = SAM_STAT_TASK_ABORTED;
+
+			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		}
+
+		break;
+
+	case SCU_TASK_DONE_CRC_ERR:
+	case SCU_TASK_DONE_NAK_CMD_ERR:
+	case SCU_TASK_DONE_EXCESS_DATA:
+	case SCU_TASK_DONE_UNEXP_FIS:
+	/* Also SCU_TASK_DONE_UNEXP_RESP: */
+	case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
+	case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
+	case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
+		/* These are conditions in which the target
+		 * has completed the task, so that no cleanup
+		 * is necessary.
+		 */
+		*response_ptr = SAS_TASK_COMPLETE;
+
+		/* See if the device has been/is being stopped. Note
+		 * that we ignore the quiesce state, since we are
+		 * concerned about the actual device state.
+		 */
+		if (!idev)
+			*status_ptr = SAS_DEVICE_UNKNOWN;
+		else
+			*status_ptr = SAS_ABORTED_TASK;
+
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		break;
+
+
+	/* Note that the only open reject completion codes seen here will be
+	 * abandon-class codes; all others are automatically retried in the SCU.
+	 */
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_WRONG_DEST);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+		/* Note - the return of AB0 will change when
+		 * libsas implements detection of zone violations.
+		 */
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_RESV_AB0);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_RESV_AB1);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_RESV_AB2);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_RESV_AB3);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_BAD_DEST);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_STP_NORES);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_EPROTO);
+		break;
+
+	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+		isci_request_set_open_reject_status(
+			request, task, response_ptr, status_ptr,
+			SAS_OREJ_CONN_RATE);
+		break;
+
+	case SCU_TASK_DONE_LL_R_ERR:
+	/* Also SCU_TASK_DONE_ACK_NAK_TO: */
+	case SCU_TASK_DONE_LL_PERR:
+	case SCU_TASK_DONE_LL_SY_TERM:
+	/* Also SCU_TASK_DONE_NAK_ERR:*/
+	case SCU_TASK_DONE_LL_LF_TERM:
+	/* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+	case SCU_TASK_DONE_LL_ABORT_ERR:
+	case SCU_TASK_DONE_SEQ_INV_TYPE:
+	/* Also SCU_TASK_DONE_UNEXP_XR: */
+	case SCU_TASK_DONE_XR_IU_LEN_ERR:
+	case SCU_TASK_DONE_INV_FIS_LEN:
+	/* Also SCU_TASK_DONE_XR_WD_LEN: */
+	case SCU_TASK_DONE_SDMA_ERR:
+	case SCU_TASK_DONE_OFFSET_ERR:
+	case SCU_TASK_DONE_MAX_PLD_ERR:
+	case SCU_TASK_DONE_LF_ERR:
+	case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
+	case SCU_TASK_DONE_SMP_LL_RX_ERR:
+	case SCU_TASK_DONE_UNEXP_DATA:
+	case SCU_TASK_DONE_UNEXP_SDBFIS:
+	case SCU_TASK_DONE_REG_ERR:
+	case SCU_TASK_DONE_SDB_ERR:
+	case SCU_TASK_DONE_TASK_ABORT:
+	default:
+		/* Task in the target is not done. */
+		*response_ptr = SAS_TASK_UNDELIVERED;
+		*status_ptr = SAM_STAT_TASK_ABORTED;
+
+		if (task->task_proto == SAS_PROTOCOL_SMP)
+			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		else
+			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		break;
+	}
+}
+
+static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
+{
+	struct task_status_struct *ts = &task->task_status;
+	struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+	resp->frame_len = sizeof(*fis);
+	memcpy(resp->ending_fis, fis, sizeof(*fis));
+	ts->buf_valid_size = sizeof(*resp);
+
+	/* If an error is flagged let libata decode the fis */
+	if (ac_err_mask(fis->status))
+		ts->stat = SAS_PROTO_RESPONSE;
+	else
+		ts->stat = SAM_STAT_GOOD;
+
+	ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+					     struct isci_request *request,
+					     enum sci_io_status completion_status)
+{
+	struct sas_task *task = isci_request_access_task(request);
+	struct ssp_response_iu *resp_iu;
+	unsigned long task_flags;
+	struct isci_remote_device *idev = request->target_device;
+	enum service_response response = SAS_TASK_UNDELIVERED;
+	enum exec_status status = SAS_ABORTED_TASK;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: request = %p, task = %p, "
+		"task->data_dir = %d completion_status = 0x%x\n",
+		__func__, request, task, task->data_dir, completion_status);
+
+	/* The request is done from an SCU HW perspective. */
+
+	/* This is an active request being completed from the core. */
+	switch (completion_status) {
+
+	case SCI_IO_FAILURE_RESPONSE_VALID:
+		dev_dbg(&ihost->pdev->dev,
+			"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+			__func__, request, task);
+
+		if (sas_protocol_ata(task->task_proto)) {
+			isci_process_stp_response(task, &request->stp.rsp);
+		} else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+			/* crack the iu response buffer. */
+			resp_iu = &request->ssp.rsp;
+			isci_request_process_response_iu(task, resp_iu,
+							 &ihost->pdev->dev);
+
+		} else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+			dev_err(&ihost->pdev->dev,
+				"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+					"SAS_PROTOCOL_SMP protocol\n",
+				__func__);
+
+		} else
+			dev_err(&ihost->pdev->dev,
+				"%s: unknown protocol\n", __func__);
+
+		/* use the task status set in the task struct by the
+		* isci_request_process_response_iu call.
+		*/
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		response = task->task_status.resp;
+		status = task->task_status.stat;
+		break;
+
+	case SCI_IO_SUCCESS:
+	case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+		response = SAS_TASK_COMPLETE;
+		status   = SAM_STAT_GOOD;
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+		if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+			/* This was an SSP / STP / SATA transfer.
+			* There is a possibility that less data than
+			* the maximum was transferred.
+			*/
+			u32 transferred_length = sci_req_tx_bytes(request);
+
+			task->task_status.residual
+				= task->total_xfer_len - transferred_length;
+
+			/* If there were residual bytes, call this an
+			* underrun.
+			*/
+			if (task->task_status.residual != 0)
+				status = SAS_DATA_UNDERRUN;
+
+			dev_dbg(&ihost->pdev->dev,
+				"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+				__func__, status);
+
+		} else
+			dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
+				__func__);
+		break;
+
+	case SCI_IO_FAILURE_TERMINATED:
+
+		dev_dbg(&ihost->pdev->dev,
+			"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+			__func__, request, task);
+
+		/* The request was terminated explicitly. */
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		response = SAS_TASK_UNDELIVERED;
+
+		/* See if the device has been/is being stopped. Note
+		* that we ignore the quiesce state, since we are
+		* concerned about the actual device state.
+		*/
+		if (!idev)
+			status = SAS_DEVICE_UNKNOWN;
+		else
+			status = SAS_ABORTED_TASK;
+		break;
+
+	case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+		isci_request_handle_controller_specific_errors(idev, request,
+							       task, &response,
+							       &status);
+		break;
+
+	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+		/* This is a special case, in that the I/O completion
+		* is telling us that the device needs a reset.
+		* In order for the device reset condition to be
+		* noticed, the I/O has to be handled in the error
+		* handler.  Set the reset flag and cause the
+		* SCSI error thread to be scheduled.
+		*/
+		spin_lock_irqsave(&task->task_state_lock, task_flags);
+		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+		spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+		/* Fail the I/O. */
+		response = SAS_TASK_UNDELIVERED;
+		status = SAM_STAT_TASK_ABORTED;
+
+		clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		break;
+
+	case SCI_FAILURE_RETRY_REQUIRED:
+
+		/* Fail the I/O so it can be retried. */
+		response = SAS_TASK_UNDELIVERED;
+		if (!idev)
+			status = SAS_DEVICE_UNKNOWN;
+		else
+			status = SAS_ABORTED_TASK;
+
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		break;
+
+
+	default:
+		/* Catch any otherwise unhandled error codes here. */
+		dev_dbg(&ihost->pdev->dev,
+			"%s: invalid completion code: 0x%x - "
+				"isci_request = %p\n",
+			__func__, completion_status, request);
+
+		response = SAS_TASK_UNDELIVERED;
+
+		/* See if the device has been/is being stopped. Note
+		* that we ignore the quiesce state, since we are
+		* concerned about the actual device state.
+		*/
+		if (!idev)
+			status = SAS_DEVICE_UNKNOWN;
+		else
+			status = SAS_ABORTED_TASK;
+
+		if (SAS_PROTOCOL_SMP == task->task_proto)
+			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		else
+			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		break;
+	}
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SSP:
+		if (task->data_dir == DMA_NONE)
+			break;
+		if (task->num_scatter == 0)
+			/* 0 indicates a single dma address */
+			dma_unmap_single(&ihost->pdev->dev,
+					 request->zero_scatter_daddr,
+					 task->total_xfer_len, task->data_dir);
+		else  /* unmap the sgl dma addresses */
+			dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+				     request->num_sg_entries, task->data_dir);
+		break;
+	case SAS_PROTOCOL_SMP: {
+		struct scatterlist *sg = &task->smp_task.smp_req;
+		struct smp_req *smp_req;
+		void *kaddr;
+
+		dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+		/* need to swab it back in case the command buffer is re-used */
+		kaddr = kmap_atomic(sg_page(sg));
+		smp_req = kaddr + sg->offset;
+		sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+		kunmap_atomic(kaddr);
+		break;
+	}
+	default:
+		break;
+	}
+
+	spin_lock_irqsave(&task->task_state_lock, task_flags);
+
+	task->task_status.resp = response;
+	task->task_status.stat = status;
+
+	if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
+		/* Normal notification (task_done) */
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
+	}
+	spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+	/* complete the io request to the core. */
+	sci_controller_complete_io(ihost, request->target_device, request);
+
+	/* set terminated handle so it cannot be completed or
+	 * terminated again, and to cause any calls into abort
+	 * task to recognize the already completed case.
+	 */
+	set_bit(IREQ_TERMINATED, &request->flags);
+
+	ireq_done(ihost, request, task);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+	struct domain_device *dev = ireq->target_device->domain_dev;
+	enum sci_base_request_states state;
+	struct sas_task *task;
+
+	/* XXX as hch said always creating an internal sas_task for tmf
+	 * requests would simplify the driver
+	 */
+	task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
+
+	/* all unaccelerated request types (non ssp or ncq) handled with
+	 * substates
+	 */
+	if (!task && dev->dev_type == SAS_END_DEVICE) {
+		state = SCI_REQ_TASK_WAIT_TC_COMP;
+	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+		state = SCI_REQ_SMP_WAIT_RESP;
+	} else if (task && sas_protocol_ata(task->task_proto) &&
+		   !task->ata_task.use_ncq) {
+		if (dev->sata_dev.class == ATA_DEV_ATAPI &&
+			task->ata_task.fis.command == ATA_CMD_PACKET) {
+			state = SCI_REQ_ATAPI_WAIT_H2D;
+		} else if (task->data_dir == DMA_NONE) {
+			state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+		} else if (task->ata_task.dma_xfer) {
+			state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+		} else /* PIO */ {
+			state = SCI_REQ_STP_PIO_WAIT_H2D;
+		}
+	} else {
+		/* SSP or NCQ are fully accelerated, no substates */
+		return;
+	}
+	sci_change_state(sm, state);
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+	struct isci_host *ihost = ireq->owning_controller;
+
+	/* Tell the SCI_USER that the IO request is complete */
+	if (!test_bit(IREQ_TMF, &ireq->flags))
+		isci_request_io_request_complete(ihost, ireq,
+						 ireq->sci_status);
+	else
+		isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+	/* Setting the abort bit in the Task Context is required by the silicon. */
+	ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+	ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+	ireq->target_device->working_request = ireq;
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+	[SCI_REQ_INIT] = { },
+	[SCI_REQ_CONSTRUCTED] = { },
+	[SCI_REQ_STARTED] = {
+		.enter_state = sci_request_started_state_enter,
+	},
+	[SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+		.enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+	},
+	[SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+	[SCI_REQ_STP_PIO_WAIT_H2D] = {
+		.enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+	},
+	[SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+	[SCI_REQ_STP_PIO_DATA_IN] = { },
+	[SCI_REQ_STP_PIO_DATA_OUT] = { },
+	[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+	[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+	[SCI_REQ_TASK_WAIT_TC_COMP] = { },
+	[SCI_REQ_TASK_WAIT_TC_RESP] = { },
+	[SCI_REQ_SMP_WAIT_RESP] = { },
+	[SCI_REQ_SMP_WAIT_TC_COMP] = { },
+	[SCI_REQ_ATAPI_WAIT_H2D] = { },
+	[SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
+	[SCI_REQ_ATAPI_WAIT_D2H] = { },
+	[SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
+	[SCI_REQ_COMPLETED] = {
+		.enter_state = sci_request_completed_state_enter,
+	},
+	[SCI_REQ_ABORTING] = {
+		.enter_state = sci_request_aborting_state_enter,
+	},
+	[SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+				   struct isci_remote_device *idev,
+				   struct isci_request *ireq)
+{
+	sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+	ireq->target_device = idev;
+	ireq->protocol = SAS_PROTOCOL_NONE;
+	ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+	ireq->sci_status   = SCI_SUCCESS;
+	ireq->scu_status   = 0;
+	ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+			  struct isci_remote_device *idev,
+			  struct isci_request *ireq)
+{
+	struct domain_device *dev = idev->domain_dev;
+	enum sci_status status = SCI_SUCCESS;
+
+	/* Build the common part of the request */
+	sci_general_request_construct(ihost, idev, ireq);
+
+	if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+		return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+	if (dev->dev_type == SAS_END_DEVICE)
+		/* pass */;
+	else if (dev_is_sata(dev))
+		memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+	else if (dev_is_expander(dev))
+		/* pass */;
+	else
+		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+	memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+	return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+					    struct isci_remote_device *idev,
+					    u16 io_tag, struct isci_request *ireq)
+{
+	struct domain_device *dev = idev->domain_dev;
+	enum sci_status status = SCI_SUCCESS;
+
+	/* Build the common part of the request */
+	sci_general_request_construct(ihost, idev, ireq);
+
+	if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
+		set_bit(IREQ_TMF, &ireq->flags);
+		memset(ireq->tc, 0, sizeof(struct scu_task_context));
+
+		/* Set the protocol indicator. */
+		if (dev_is_sata(dev))
+			ireq->protocol = SAS_PROTOCOL_STP;
+		else
+			ireq->protocol = SAS_PROTOCOL_SSP;
+	} else
+		status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+	return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+	struct isci_request *request)
+{
+	enum sci_status status;
+
+	dev_dbg(&request->isci_host->pdev->dev,
+		"%s: request = %p\n",
+		__func__,
+		request);
+	status = sci_io_request_construct_basic_ssp(request);
+	return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+	struct sas_task *task = isci_request_access_task(ireq);
+	struct host_to_dev_fis *fis = &ireq->stp.cmd;
+	struct ata_queued_cmd *qc = task->uldd_task;
+	enum sci_status status;
+
+	dev_dbg(&ireq->isci_host->pdev->dev,
+		"%s: ireq = %p\n",
+		__func__,
+		ireq);
+
+	memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+	if (!task->ata_task.device_control_reg_update)
+		fis->flags |= 0x80;
+	fis->flags &= 0xF0;
+
+	status = sci_io_request_construct_basic_sata(ireq);
+
+	if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+		   qc->tf.command == ATA_CMD_FPDMA_READ)) {
+		fis->sector_count = qc->tag << 3;
+		ireq->tc->type.stp.ncq_tag = qc->tag;
+	}
+
+	return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+			      struct isci_request *ireq,
+			      struct sas_task *task)
+{
+	struct scatterlist *sg = &task->smp_task.smp_req;
+	struct isci_remote_device *idev;
+	struct scu_task_context *task_context;
+	struct isci_port *iport;
+	struct smp_req *smp_req;
+	void *kaddr;
+	u8 req_len;
+	u32 cmd;
+
+	kaddr = kmap_atomic(sg_page(sg));
+	smp_req = kaddr + sg->offset;
+	/*
+	 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+	 * functions under SAS 2.0, a zero request length really indicates
+	 * a non-zero default length.
+	 */
+	if (smp_req->req_len == 0) {
+		switch (smp_req->func) {
+		case SMP_DISCOVER:
+		case SMP_REPORT_PHY_ERR_LOG:
+		case SMP_REPORT_PHY_SATA:
+		case SMP_REPORT_ROUTE_INFO:
+			smp_req->req_len = 2;
+			break;
+		case SMP_CONF_ROUTE_INFO:
+		case SMP_PHY_CONTROL:
+		case SMP_PHY_TEST_FUNCTION:
+			smp_req->req_len = 9;
+			break;
+			/* Default - zero is a valid default for 2.0. */
+		}
+	}
+	req_len = smp_req->req_len;
+	sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+	cmd = *(u32 *) smp_req;
+	kunmap_atomic(kaddr);
+
+	if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+		return SCI_FAILURE;
+
+	ireq->protocol = SAS_PROTOCOL_SMP;
+
+	/* byte swap the smp request. */
+
+	task_context = ireq->tc;
+
+	idev = ireq->target_device;
+	iport = idev->owning_port;
+
+	/*
+	 * Fill in the TC with the its required data
+	 * 00h
+	 */
+	task_context->priority = 0;
+	task_context->initiator_request = 1;
+	task_context->connection_rate = idev->connection_rate;
+	task_context->protocol_engine_index = ISCI_PEG;
+	task_context->logical_port_index = iport->physical_port_index;
+	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+	task_context->abort = 0;
+	task_context->valid = SCU_TASK_CONTEXT_VALID;
+	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+	/* 04h */
+	task_context->remote_node_index = idev->rnc.remote_node_index;
+	task_context->command_code = 0;
+	task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+	/* 08h */
+	task_context->link_layer_control = 0;
+	task_context->do_not_dma_ssp_good_response = 1;
+	task_context->strict_ordering = 0;
+	task_context->control_frame = 1;
+	task_context->timeout_enable = 0;
+	task_context->block_guard_enable = 0;
+
+	/* 0ch */
+	task_context->address_modifier = 0;
+
+	/* 10h */
+	task_context->ssp_command_iu_length = req_len;
+
+	/* 14h */
+	task_context->transfer_length_bytes = 0;
+
+	/*
+	 * 18h ~ 30h, protocol specific
+	 * since commandIU has been build by framework at this point, we just
+	 * copy the frist DWord from command IU to this location. */
+	memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+	/*
+	 * 40h
+	 * "For SMP you could program it to zero. We would prefer that way
+	 * so that done code will be consistent." - Venki
+	 */
+	task_context->task_phase = 0;
+
+	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+			       (iport->physical_port_index <<
+				SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+			      ISCI_TAG_TCI(ireq->io_tag));
+	/*
+	 * Copy the physical address for the command buffer to the SCU Task
+	 * Context command buffer should not contain command header.
+	 */
+	task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+	task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+	/* SMP response comes as UF, so no need to set response IU address. */
+	task_context->response_iu_upper = 0;
+	task_context->response_iu_lower = 0;
+
+	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+	return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ *    request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+	struct sas_task *task = isci_request_access_task(ireq);
+	struct device *dev = &ireq->isci_host->pdev->dev;
+	enum sci_status status = SCI_FAILURE;
+
+	status = sci_io_request_construct_smp(dev, ireq, task);
+	if (status != SCI_SUCCESS)
+		dev_dbg(&ireq->isci_host->pdev->dev,
+			 "%s: failed with status = %d\n",
+			 __func__,
+			 status);
+
+	return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ *    request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ *    object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+					     struct isci_request *request,
+					     struct isci_remote_device *idev)
+{
+	enum sci_status status = SCI_SUCCESS;
+	struct sas_task *task = isci_request_access_task(request);
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: idev = 0x%p; request = %p, "
+		"num_scatter = %d\n",
+		__func__,
+		idev,
+		request,
+		task->num_scatter);
+
+	/* map the sgl addresses, if present.
+	 * libata does the mapping for sata devices
+	 * before we get the request.
+	 */
+	if (task->num_scatter &&
+	    !sas_protocol_ata(task->task_proto) &&
+	    !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+		request->num_sg_entries = dma_map_sg(
+			&ihost->pdev->dev,
+			task->scatter,
+			task->num_scatter,
+			task->data_dir
+			);
+
+		if (request->num_sg_entries == 0)
+			return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+	}
+
+	status = sci_io_request_construct(ihost, idev, request);
+
+	if (status != SCI_SUCCESS) {
+		dev_dbg(&ihost->pdev->dev,
+			 "%s: failed request construct\n",
+			 __func__);
+		return SCI_FAILURE;
+	}
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SMP:
+		status = isci_smp_request_build(request);
+		break;
+	case SAS_PROTOCOL_SSP:
+		status = isci_request_ssp_request_construct(request);
+		break;
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+		status = isci_request_stp_request_construct(request);
+		break;
+	default:
+		dev_dbg(&ihost->pdev->dev,
+			 "%s: unknown protocol\n", __func__);
+		return SCI_FAILURE;
+	}
+
+	return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+	struct isci_request *ireq;
+
+	ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+	ireq->io_tag = tag;
+	ireq->io_request_completion = NULL;
+	ireq->flags = 0;
+	ireq->num_sg_entries = 0;
+
+	return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+						     struct sas_task *task,
+						     u16 tag)
+{
+	struct isci_request *ireq;
+
+	ireq = isci_request_from_tag(ihost, tag);
+	ireq->ttype_ptr.io_task_ptr = task;
+	clear_bit(IREQ_TMF, &ireq->flags);
+	task->lldd_task = ireq;
+
+	return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+					       struct isci_tmf *isci_tmf,
+					       u16 tag)
+{
+	struct isci_request *ireq;
+
+	ireq = isci_request_from_tag(ihost, tag);
+	ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+	set_bit(IREQ_TMF, &ireq->flags);
+
+	return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+			 struct sas_task *task, u16 tag)
+{
+	enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+	struct isci_request *ireq;
+	unsigned long flags;
+	int ret = 0;
+
+	/* do common allocation and init of request object. */
+	ireq = isci_io_request_from_tag(ihost, task, tag);
+
+	status = isci_io_request_build(ihost, ireq, idev);
+	if (status != SCI_SUCCESS) {
+		dev_dbg(&ihost->pdev->dev,
+			 "%s: request_construct failed - status = 0x%x\n",
+			 __func__,
+			 status);
+		return status;
+	}
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+		if (isci_task_is_ncq_recovery(task)) {
+
+			/* The device is in an NCQ recovery state.  Issue the
+			 * request on the task side.  Note that it will
+			 * complete on the I/O request side because the
+			 * request was built that way (ie.
+			 * ireq->is_task_management_request is false).
+			 */
+			status = sci_controller_start_task(ihost,
+							    idev,
+							    ireq);
+		} else {
+			status = SCI_FAILURE;
+		}
+	} else {
+		/* send the request, let the core assign the IO TAG.	*/
+		status = sci_controller_start_io(ihost, idev,
+						  ireq);
+	}
+
+	if (status != SCI_SUCCESS &&
+	    status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+		dev_dbg(&ihost->pdev->dev,
+			 "%s: failed request start (0x%x)\n",
+			 __func__, status);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		return status;
+	}
+	/* Either I/O started OK, or the core has signaled that
+	 * the device needs a target reset.
+	 */
+	if (status != SCI_SUCCESS) {
+		/* The request did not really start in the
+		 * hardware, so clear the request handle
+		 * here so no terminations will be done.
+		 */
+		set_bit(IREQ_TERMINATED, &ireq->flags);
+	}
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (status ==
+	    SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+		/* Signal libsas that we need the SCSI error
+		 * handler thread to work on this I/O and that
+		 * we want a device reset.
+		 */
+		spin_lock_irqsave(&task->task_state_lock, flags);
+		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+		/* Cause this task to be scheduled in the SCSI error
+		 * handler thread.
+		 */
+		sas_task_abort(task);
+
+		/* Change the status, since we are holding
+		 * the I/O until it is managed by the SCSI
+		 * error handler.
+		 */
+		status = SCI_SUCCESS;
+	}
+
+	return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 0000000..aff9531
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,310 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ *	     to wait for another fis or if the transfer is complete.  Upon
+ *           receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ */
+struct isci_stp_request {
+	u32 pio_len;
+	u8 status;
+
+	struct isci_stp_pio_sgl {
+		int index;
+		u8 set;
+		u32 offset;
+	} sgl;
+};
+
+struct isci_request {
+	#define IREQ_COMPLETE_IN_TARGET 0
+	#define IREQ_TERMINATED 1
+	#define IREQ_TMF 2
+	#define IREQ_ACTIVE 3
+	#define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
+	#define IREQ_TC_ABORT_POSTED 5
+	#define IREQ_ABORT_PATH_ACTIVE 6
+	#define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
+	unsigned long flags;
+	/* XXX kill ttype and ttype_ptr, allocate full sas_task */
+	union ttype_ptr_union {
+		struct sas_task *io_task_ptr;   /* When ttype==io_task  */
+		struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
+	} ttype_ptr;
+	struct isci_host *isci_host;
+	dma_addr_t request_daddr;
+	dma_addr_t zero_scatter_daddr;
+	unsigned int num_sg_entries;
+	/* Note: "io_request_completion" is completed in two different ways
+	 * depending on whether this is a TMF or regular request.
+	 * - TMF requests are completed in the thread that started them;
+	 * - regular requests are completed in the request completion callback
+	 *   function.
+	 * This difference in operation allows the aborter of a TMF request
+	 * to be sure that once the TMF request completes, the I/O that the
+	 * TMF was aborting is guaranteed to have completed.
+	 *
+	 * XXX kill io_request_completion
+	 */
+	struct completion *io_request_completion;
+	struct sci_base_state_machine sm;
+	struct isci_host *owning_controller;
+	struct isci_remote_device *target_device;
+	u16 io_tag;
+	enum sas_protocol protocol;
+	u32 scu_status; /* hardware result */
+	u32 sci_status; /* upper layer disposition */
+	u32 post_context;
+	struct scu_task_context *tc;
+	/* could be larger with sg chaining */
+	#define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+	struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+	/* This field is a pointer to the stored rx frame data.  It is used in
+	 * STP internal requests and SMP response frames.  If this field is
+	 * non-NULL the saved frame must be released on IO request completion.
+	 */
+	u32 saved_rx_frame_index;
+
+	union {
+		struct {
+			union {
+				struct ssp_cmd_iu cmd;
+				struct ssp_task_iu tmf;
+			};
+			union {
+				struct ssp_response_iu rsp;
+				u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+			};
+		} ssp;
+		struct {
+			struct isci_stp_request req;
+			struct host_to_dev_fis cmd;
+			struct dev_to_host_fis rsp;
+		} stp;
+	};
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+	struct isci_request *ireq;
+
+	ireq = container_of(stp_req, typeof(*ireq), stp.req);
+	return ireq;
+}
+
+/**
+ * enum sci_base_request_states - request state machine states
+ *
+ * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
+ *
+ * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
+ * constructed.  This state is entered from the INITIAL state.
+ *
+ * @SCI_REQ_STARTED: This state indicates that the request has been started.
+ * This state is entered from the CONSTRUCTED state.
+ *
+ * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ * @SCI_REQ_STP_UDMA_WAIT_D2H:
+ * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ *
+ * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
+ * waiting for either a PIO Setup FIS or a D2H register FIS.  The type of frame
+ * received is based on the result of the prior frame and line conditions.
+ *
+ * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
+ * waiting for a DATA frame from the device.
+ *
+ * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
+ * waiting to transmit the next data frame to the device.
+ *
+ * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
+ * waiting for either a PIO Setup.
+ *
+ * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
+ * after receiving TC completion. While in this state IO request object is
+ * waiting for D2H status frame as UF.
+ *
+ * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
+ * task context completion after every frame submission, so in the
+ * non-accelerated case we need to expect the completion for the "cdb" frame.
+ *
+ * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started raw task management request is waiting for the transmission of
+ * the initial frame (i.e. command, task, etc.).
+ *
+ * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e.  response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e.  response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started SMP request is waiting for the transmission of the initial frame
+ * (i.e.  command, task, etc.).
+ *
+ * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered from the
+ * ABORTING state.
+ *
+ * @SCI_REQ_ABORTING: This state indicates that the request is in the process
+ * of being terminated/aborted.  This state is entered from the CONSTRUCTED
+ * state.  This state is entered from the STARTED state.
+ *
+ * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
+ */
+#define REQUEST_STATES {\
+	C(REQ_INIT),\
+	C(REQ_CONSTRUCTED),\
+	C(REQ_STARTED),\
+	C(REQ_STP_UDMA_WAIT_TC_COMP),\
+	C(REQ_STP_UDMA_WAIT_D2H),\
+	C(REQ_STP_NON_DATA_WAIT_H2D),\
+	C(REQ_STP_NON_DATA_WAIT_D2H),\
+	C(REQ_STP_PIO_WAIT_H2D),\
+	C(REQ_STP_PIO_WAIT_FRAME),\
+	C(REQ_STP_PIO_DATA_IN),\
+	C(REQ_STP_PIO_DATA_OUT),\
+	C(REQ_ATAPI_WAIT_H2D),\
+	C(REQ_ATAPI_WAIT_PIO_SETUP),\
+	C(REQ_ATAPI_WAIT_D2H),\
+	C(REQ_ATAPI_WAIT_TC_COMP),\
+	C(REQ_TASK_WAIT_TC_COMP),\
+	C(REQ_TASK_WAIT_TC_RESP),\
+	C(REQ_SMP_WAIT_RESP),\
+	C(REQ_SMP_WAIT_TC_COMP),\
+	C(REQ_COMPLETED),\
+	C(REQ_ABORTING),\
+	C(REQ_FINAL),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_base_request_states REQUEST_STATES;
+#undef C
+const char *req_state_name(enum sci_base_request_states state);
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+				  u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+				  u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+	char *requested_addr = (char *)virt_addr;
+	char *base_addr = (char *)ireq;
+
+	BUG_ON(requested_addr < base_addr);
+	BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+	return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+					       struct isci_tmf *isci_tmf,
+					       u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+			 struct sas_task *task, u16 tag);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+			    struct isci_remote_device *idev,
+			    u16 io_tag,
+			    struct isci_request *ireq);
+enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+	return (sas_protocol_ata(task->task_proto) &&
+		task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+		task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 0000000..dc26b4a
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,217 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D          0x27
+#define FIS_REGD2H          0x34
+#define FIS_SETDEVBITS      0xA1
+#define FIS_DMA_ACTIVATE    0x39
+#define FIS_DMA_SETUP       0x41
+#define FIS_BIST_ACTIVATE   0x58
+#define FIS_PIO_SETUP       0x5F
+#define FIS_DATA            0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE	280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+	u8 LUN[8];
+	u8 add_cdb_len:6;
+	u8 _r_a:2;
+	u8 _r_b;
+	u8 en_fburst:1;
+	u8 task_prio:4;
+	u8 task_attr:3;
+	u8 _r_c;
+
+	u8 cdb[16];
+}  __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+	u8 LUN[8];
+	u8 _r_a;
+	u8 task_func;
+	u8 _r_b[4];
+	u16 task_tag;
+	u8 _r_c[12];
+}  __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ *    an SMP Request that is comprised of the struct smp_request_header and a
+ *    phy identifier.
+ *    Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+	u8 _r_a[4];		/* bytes 4-7 */
+
+	u8 ign_zone_grp:1;	/* byte 8 */
+	u8 _r_b:7;
+
+	u8 phy_id;		/* byte 9 */
+	u8 _r_c;		/* byte 10 */
+	u8 _r_d;		/* byte 11 */
+}  __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ *    contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+	u16 exp_change_cnt;		/* bytes 4-5 */
+	u8 exp_rt_idx_hi;		/* byte 6 */
+	u8 exp_rt_idx;			/* byte 7 */
+
+	u8 _r_a;			/* byte 8 */
+	u8 phy_id;			/* byte 9 */
+	u16 _r_b;			/* bytes 10-11 */
+
+	u8 _r_c:7;			/* byte 12 */
+	u8 dis_rt_entry:1;
+	u8 _r_d[3];			/* bytes 13-15 */
+
+	u8 rt_sas_addr[8];		/* bytes 16-23 */
+	u8 _r_e[16];			/* bytes 24-39 */
+}  __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ *    SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+	u16 exp_change_cnt;		/* byte 4-5 */
+
+	u8 _r_a[3];			/* bytes 6-8 */
+
+	u8 phy_id;			/* byte 9 */
+	u8 phy_op;			/* byte 10 */
+
+	u8 upd_pathway:1;		/* byte 11 */
+	u8 _r_b:7;
+
+	u8 _r_c[12];			/* byte 12-23 */
+
+	u8 att_dev_name[8];             /* byte 24-31 */
+
+	u8 _r_d:4;			/* byte 32 */
+	u8 min_linkrate:4;
+
+	u8 _r_e:4;			/* byte 33 */
+	u8 max_linkrate:4;
+
+	u8 _r_f[2];			/* byte 34-35 */
+
+	u8 pathway:4;			/* byte 36 */
+	u8 _r_g:4;
+
+	u8 _r_h[3];			/* bytes 37-39 */
+}  __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ *    structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+	u8 type;		/* byte 0 */
+	u8 func;		/* byte 1 */
+	u8 alloc_resp_len;	/* byte 2 */
+	u8 req_len;		/* byte 3 */
+	u8 req_data[0];
+}  __packed;
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ *    represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+	u32 high;
+	u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 0000000..071cb74
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,285 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ *    codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT      28
+#define SCU_COMPLETION_TYPE_MASK       0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+	((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK       SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA       SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI        SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT      SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY     SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK       0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK    0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT   22
+#define SCU_COMPLETION_SDMA_STATUS_MASK  0x003C0000
+#define SCU_COMPLETION_PEG_MASK          0x00010000
+#define SCU_COMPLETION_PORT_MASK         0x00007000
+#define SCU_COMPLETION_PE_MASK           SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT          12
+#define SCU_COMPLETION_INDEX_MASK        0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+	((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+	((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+	((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+	((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+	(\
+		((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+		>> SCU_COMPLETION_TL_STATUS_SHIFT \
+	)
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code)	\
+	((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code)	\
+	((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+	((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+	(((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+	((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK     0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT    16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+	(\
+		((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+		>> SCU_UNSOLICITED_FRAME_SHIFT \
+	)
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK  0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+	((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD                                  ((u32)0x00)
+#define SCU_TASK_DONE_TX_RAW_CMD_ERR                        ((u32)0x08)
+#define SCU_TASK_DONE_CRC_ERR                               ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE                        ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE                          ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR                           ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR                          ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR                              ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO                            ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR                               ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM                            ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR                               ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM                            ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR                          ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM                            ((u32)0x1B)
+#define SCU_TASK_DONE_BREAK_RCVD                            ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR                          ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE                          ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR                              ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE                          ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR                         ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN                           ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN                             ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR                              ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR                            ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR                           ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA                           ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR                                ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS                             ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP                            ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP                            ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR                       ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR                       ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR                           ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR                           ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR                      ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR                         ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR                          ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA                            ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL                             ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS                          ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR                               ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR                               ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT                            ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR                          ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR                      ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION              ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1             ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2             ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3             ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION                ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION                 ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV                         ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV                          ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND                        ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY             ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED         ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED  ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 0000000..36a945a
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT      24
+#define SCU_EVENT_TYPE_CODE_MASK       0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT  18
+#define SCU_EVENT_SPECIFIC_CODE_MASK   0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+	(SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+	((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+	((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+	((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR  SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR      SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR          SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR    SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE   SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT         SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX     SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX  SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC       SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT      SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE    0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR	\
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES	\
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN	\
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX	\
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+	((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+	((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+	((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+	scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT	\
+	scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+	SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+	scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 0000000..33745ad
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ *    describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ *    definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+	/* WORD 0 */
+
+	/**
+	 * This field is the remote node index assigned for this remote node. All
+	 * remote nodes must have a unique remote node index. The value of the remote
+	 * node index can not exceed the maximum number of remote nodes reported in
+	 * the SCU device context capacity register.
+	 */
+	u32 remote_node_index:12;
+	u32 reserved0_1:4;
+
+	/**
+	 * This field tells the SCU hardware how many simultaneous connections that
+	 * this remote node will support.
+	 */
+	u32 remote_node_port_width:4;
+
+	/**
+	 * This field tells the SCU hardware which logical port to associate with this
+	 * remote node.
+	 */
+	u32 logical_port_index:3;
+	u32 reserved0_2:5;
+
+	/**
+	 * This field will enable the I_T nexus loss timer for this remote node.
+	 */
+	u32 nexus_loss_timer_enable:1;
+
+	/**
+	 * This field is the for driver debug only and is not used.
+	 */
+	u32 check_bit:1;
+
+	/**
+	 * This field must be set to true when the hardware DMAs the remote node
+	 * context to the hardware SRAM.  When the remote node is being invalidated
+	 * this field must be set to false.
+	 */
+	u32 is_valid:1;
+
+	/**
+	 * This field must be set to true.
+	 */
+	u32 is_remote_node_context:1;
+
+	/* WORD 1 - 2 */
+
+	/**
+	 * This is the low word of the remote device SAS Address
+	 */
+	u32 remote_sas_address_lo;
+
+	/**
+	 * This field is the high word of the remote device SAS Address
+	 */
+	u32 remote_sas_address_hi;
+
+	/* WORD 3 */
+	/**
+	 * This field reprensets the function number assigned to this remote device.
+	 * This value must match the virtual function number that is being used to
+	 * communicate to the device.
+	 */
+	u32 function_number:8;
+	u32 reserved3_1:8;
+
+	/**
+	 * This field provides the driver a way to cheat on the arbitration wait time
+	 * for this remote node.
+	 */
+	u32 arbitration_wait_time:16;
+
+	/* WORD 4 */
+	/**
+	 * This field tells the SCU hardware how long this device may occupy the
+	 * connection before it must be closed.
+	 */
+	u32 connection_occupancy_timeout:16;
+
+	/**
+	 * This field tells the SCU hardware how long to maintain a connection when
+	 * there are no frames being transmitted on the link.
+	 */
+	u32 connection_inactivity_timeout:16;
+
+	/* WORD  5 */
+	/**
+	 * This field allows the driver to cheat on the arbitration wait time for this
+	 * remote node.
+	 */
+	u32 initial_arbitration_wait_time:16;
+
+	/**
+	 * This field is tells the hardware what to program for the connection rate in
+	 * the open address frame.  See the SAS spec for valid values.
+	 */
+	u32 oaf_connection_rate:4;
+
+	/**
+	 * This field tells the SCU hardware what to program for the features in the
+	 * open address frame.  See the SAS spec for valid values.
+	 */
+	u32 oaf_features:4;
+
+	/**
+	 * This field tells the SCU hardware what to use for the source zone group in
+	 * the open address frame.  See the SAS spec for more details on zoning.
+	 */
+	u32 oaf_source_zone_group:8;
+
+	/* WORD 6 */
+	/**
+	 * This field tells the SCU hardware what to use as the more capibilities in
+	 * the open address frame. See the SAS Spec for details.
+	 */
+	u32 oaf_more_compatibility_features;
+
+	/* WORD 7 */
+	u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ *    definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+	/**
+	 * Placeholder data for the STP remote node.
+	 */
+	u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+	/**
+	 * SSP Remote Node
+	 */
+	struct ssp_remote_node_context ssp;
+
+	/**
+	 * STP Remote Node
+	 */
+	struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 0000000..869a979
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,965 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ *    context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ *    types the SCU hardware will accept. The definition for the various task
+ *    types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+	SCU_TASK_TYPE_IOREAD,           /* /< IO READ direction or no direction */
+	SCU_TASK_TYPE_IOWRITE,          /* /< IO Write direction */
+	SCU_TASK_TYPE_SMP_REQUEST,      /* /< SMP Request type */
+	SCU_TASK_TYPE_RESPONSE,         /* /< Driver generated response frame (targt mode) */
+	SCU_TASK_TYPE_RAW_FRAME,        /* /< Raw frame request type */
+	SCU_TASK_TYPE_PRIMITIVE         /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ *    types the SCU hardware will accept. The definition for the various task
+ *    types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+	SCU_TASK_TYPE_DMA_IN,           /* /< Read request */
+	SCU_TASK_TYPE_FPDMAQ_READ,      /* /< NCQ read request */
+	SCU_TASK_TYPE_PACKET_DMA_IN,    /* /< Packet read request */
+	SCU_TASK_TYPE_SATA_RAW_FRAME,   /* /< Raw frame request */
+	RESERVED_4,
+	RESERVED_5,
+	RESERVED_6,
+	RESERVED_7,
+	SCU_TASK_TYPE_DMA_OUT,          /* /< Write request */
+	SCU_TASK_TYPE_FPDMAQ_WRITE,     /* /< NCQ write Request */
+	SCU_TASK_TYPE_PACKET_DMA_OUT    /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE  0
+#define SCU_RNC_CONTEXT_TYPE   1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID          0
+#define SCU_TASK_CONTEXT_VALID            1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK   0
+#define SCU_COMMAND_CODE_ACTIVE_TASK          1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK   2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES    3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL          0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue.  The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q       1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH            2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED        3
+
+#define SCU_TASK_INITIATOR_MODE           1
+#define SCU_TASK_TARGET_MODE              0
+
+#define SCU_TASK_REGULAR                  0
+#define SCU_TASK_ABORTED                  1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION     0
+#define SCU_SATA_READ_DATA_DIRECTION      1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT           21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK            0x00E00000
+#define scu_get_command_request_type(x)	\
+	((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT        18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK         0x001C0000
+#define scu_get_command_request_subtype(x) \
+	((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK	 \
+	(\
+		SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK		  \
+		| SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK	    \
+	)
+#define scu_get_command_request_full_type(x) \
+	((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT  16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK   0x00010000
+#define scu_get_command_protocl_engine_group(x)	\
+	((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT           12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK            0x00007000
+#define scu_get_command_reqeust_logical_port(x)	\
+	((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+	((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC    MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC    MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC   MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC   MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC  MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command)	\
+	((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE	\
+	(MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP    0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP    0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP    0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE   0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ *    request.
+ *
+ *
+ */
+struct ssp_task_context {
+	/* OFFSET 0x18 */
+	u32 reserved00:24;
+	u32 frame_type:8;
+
+	/* OFFSET 0x1C */
+	u32 reserved01;
+
+	/* OFFSET 0x20 */
+	u32 fill_bytes:2;
+	u32 reserved02:6;
+	u32 changing_data_pointer:1;
+	u32 retransmit:1;
+	u32 retry_data_frame:1;
+	u32 tlr_control:2;
+	u32 reserved03:19;
+
+	/* OFFSET 0x24 */
+	u32 uiRsvd4;
+
+	/* OFFSET 0x28 */
+	u32 target_port_transfer_tag:16;
+	u32 tag:16;
+
+	/* OFFSET 0x2C */
+	u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ *    request.
+ *
+ *
+ */
+struct stp_task_context {
+	/* OFFSET 0x18 */
+	u32 fis_type:8;
+	u32 pm_port:4;
+	u32 reserved0:3;
+	u32 control:1;
+	u32 command:8;
+	u32 features:8;
+
+	/* OFFSET 0x1C */
+	u32 reserved1;
+
+	/* OFFSET 0x20 */
+	u32 reserved2;
+
+	/* OFFSET 0x24 */
+	u32 reserved3;
+
+	/* OFFSET 0x28 */
+	u32 ncq_tag:5;
+	u32 reserved4:27;
+
+	/* OFFSET 0x2C */
+	u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ *    request.
+ *
+ *
+ */
+struct smp_task_context {
+	/* OFFSET 0x18 */
+	u32 response_length:8;
+	u32 function_result:8;
+	u32 function:8;
+	u32 frame_type:8;
+
+	/* OFFSET 0x1C */
+	u32 smp_response_ufi:12;
+	u32 reserved1:20;
+
+	/* OFFSET 0x20 */
+	u32 reserved2;
+
+	/* OFFSET 0x24 */
+	u32 reserved3;
+
+	/* OFFSET 0x28 */
+	u32 reserved4;
+
+	/* OFFSET 0x2C */
+	u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ *    when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+	/* OFFSET 0x18 */
+	/**
+	 * This field is the control word and it must be 0.
+	 */
+	u32 control; /* /< must be set to 0 */
+
+	/* OFFSET 0x1C */
+	/**
+	 * This field specifies the primitive that is to be transmitted.
+	 */
+	u32 sequence;
+
+	/* OFFSET 0x20 */
+	u32 reserved0;
+
+	/* OFFSET 0x24 */
+	u32 reserved1;
+
+	/* OFFSET 0x28 */
+	u32 reserved2;
+
+	/* OFFSET 0x2C */
+	u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ *    field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+	struct ssp_task_context ssp;
+	struct stp_task_context stp;
+	struct smp_task_context smp;
+	struct primitive_task_context primitive;
+	u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ *    element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ *    being 24 bits in size.  The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+	/**
+	 * This field is the upper 32 bits of the 64 bit physical address.
+	 */
+	u32 address_upper;
+
+	/**
+	 * This field is the lower 32 bits of the 64 bit physical address.
+	 */
+	u32 address_lower;
+
+	/**
+	 * This field is the number of bytes to transfer.
+	 */
+	u32 length:24;
+
+	/**
+	 * This field is the address modifier to be used when a virtual function is
+	 * requesting a data transfer.
+	 */
+	u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A   0
+#define SCU_SGL_ELEMENT_PAIR_B   1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ *    of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ *    They are refered to in the DS specification as SGL A and SGL B.  Each SGL
+ *    pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+	/* OFFSET 0x60-0x68 */
+	/**
+	 * This field is the SGL element A of the SGL pair.
+	 */
+	struct scu_sgl_element A;
+
+	/* OFFSET 0x6C-0x74 */
+	/**
+	 * This field is the SGL element B of the SGL pair.
+	 */
+	struct scu_sgl_element B;
+
+	/* OFFSET 0x78-0x7C */
+	/**
+	 * This field is the upper 32 bits of the 64 bit address to the next SGL
+	 * element pair.
+	 */
+	u32 next_pair_upper;
+
+	/**
+	 * This field is the lower 32 bits of the 64 bit address to the next SGL
+	 * element pair.
+	 */
+	u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ *    for the task context. This is set to 0 by the driver but can be read by
+ *    issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+	/* OFFSET 0x48 */
+	u32 xfer_rdy_write_data_length;
+
+	/* OFFSET 0x4C */
+	u32 data_offset;
+
+	/* OFFSET 0x50 */
+	u32 data_transfer_size:24;
+	u32 reserved_50_0:8;
+
+	/* OFFSET 0x54 */
+	u32 next_initiator_write_data_offset;
+
+	/* OFFSET 0x58 */
+	u32 next_initiator_write_data_xfer_size:24;
+	u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ *    silicon task context. It lays out all of the fields according to the
+ *    expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+	/* OFFSET 0x00 ------ */
+	/**
+	 * This field must be encoded to one of the valid SCU task priority values
+	 *    - SCU_TASK_PRIORITY_NORMAL
+	 *    - SCU_TASK_PRIORITY_HEAD_OF_Q
+	 *    - SCU_TASK_PRIORITY_HIGH
+	 */
+	u32 priority:2;
+
+	/**
+	 * This field must be set to true if this is an initiator generated request.
+	 * Until target mode is supported all task requests are initiator requests.
+	 */
+	u32 initiator_request:1;
+
+	/**
+	 * This field must be set to one of the valid connection rates valid values
+	 * are 0x8, 0x9, and 0xA.
+	 */
+	u32 connection_rate:4;
+
+	/**
+	 * This field muse be programed when generating an SMP response since the SMP
+	 * connection remains open until the SMP response is generated.
+	 */
+	u32 protocol_engine_index:3;
+
+	/**
+	 * This field must contain the logical port for the task request.
+	 */
+	u32 logical_port_index:3;
+
+	/**
+	 * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+	 *    - SCU_TASK_CONTEXT_PROTOCOL_SMP
+	 *    - SCU_TASK_CONTEXT_PROTOCOL_SSP
+	 *    - SCU_TASK_CONTEXT_PROTOCOL_STP
+	 *    - SCU_TASK_CONTEXT_PROTOCOL_NONE
+	 */
+	u32 protocol_type:3;
+
+	/**
+	 * This filed must be set to the TCi allocated for this task
+	 */
+	u32 task_index:12;
+
+	/**
+	 * This field is reserved and must be set to 0x00
+	 */
+	u32 reserved_00_0:1;
+
+	/**
+	 * For a normal task request this must be set to 0.  If this is an abort of
+	 * this task request it must be set to 1.
+	 */
+	u32 abort:1;
+
+	/**
+	 * This field must be set to true for the SCU hardware to process the task.
+	 */
+	u32 valid:1;
+
+	/**
+	 * This field must be set to SCU_TASK_CONTEXT_TYPE
+	 */
+	u32 context_type:1;
+
+	/* OFFSET 0x04 */
+	/**
+	 * This field contains the RNi that is the target of this request.
+	 */
+	u32 remote_node_index:12;
+
+	/**
+	 * This field is programmed if this is a mirrored request, which we are not
+	 * using, in which case it is the RNi for the mirrored target.
+	 */
+	u32 mirrored_node_index:12;
+
+	/**
+	 * This field is programmed with the direction of the SATA reqeust
+	 *    - SCU_SATA_WRITE_DATA_DIRECTION
+	 *    - SCU_SATA_READ_DATA_DIRECTION
+	 */
+	u32 sata_direction:1;
+
+	/**
+	 * This field is programmsed with one of the following SCU_COMMAND_CODE
+	 *    - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+	 *    - SCU_COMMAND_CODE_ACTIVE_TASK
+	 *    - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+	 *    - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+	 */
+	u32 command_code:2;
+
+	/**
+	 * This field is set to true if the remote node should be suspended.
+	 * This bit is only valid for SSP & SMP target devices.
+	 */
+	u32 suspend_node:1;
+
+	/**
+	 * This field is programmed with one of the following command type codes
+	 *
+	 * For SAS requests use the scu_ssp_task_type
+	 *    - SCU_TASK_TYPE_IOREAD
+	 *    - SCU_TASK_TYPE_IOWRITE
+	 *    - SCU_TASK_TYPE_SMP_REQUEST
+	 *    - SCU_TASK_TYPE_RESPONSE
+	 *    - SCU_TASK_TYPE_RAW_FRAME
+	 *    - SCU_TASK_TYPE_PRIMITIVE
+	 *
+	 * For SATA requests use the scu_sata_task_type
+	 *    - SCU_TASK_TYPE_DMA_IN
+	 *    - SCU_TASK_TYPE_FPDMAQ_READ
+	 *    - SCU_TASK_TYPE_PACKET_DMA_IN
+	 *    - SCU_TASK_TYPE_SATA_RAW_FRAME
+	 *    - SCU_TASK_TYPE_DMA_OUT
+	 *    - SCU_TASK_TYPE_FPDMAQ_WRITE
+	 *    - SCU_TASK_TYPE_PACKET_DMA_OUT
+	 */
+	u32 task_type:4;
+
+	/* OFFSET 0x08 */
+	/**
+	 * This field is reserved and the must be set to 0x00
+	 */
+	u32 link_layer_control:8; /* presently all reserved */
+
+	/**
+	 * This field is set to true when TLR is to be enabled
+	 */
+	u32 ssp_tlr_enable:1;
+
+	/**
+	 * This is field specifies if the SCU DMAs a response frame to host
+	 * memory for good response frames when operating in target mode.
+	 */
+	u32 dma_ssp_target_good_response:1;
+
+	/**
+	 * This field indicates if the SCU should DMA the response frame to
+	 * host memory.
+	 */
+	u32 do_not_dma_ssp_good_response:1;
+
+	/**
+	 * This field is set to true when strict ordering is to be enabled
+	 */
+	u32 strict_ordering:1;
+
+	/**
+	 * This field indicates the type of endianess to be utilized for the
+	 * frame.  command, task, and response frames utilized control_frame
+	 * set to 1.
+	 */
+	u32 control_frame:1;
+
+	/**
+	 * This field is reserved and the driver should set to 0x00
+	 */
+	u32 tl_control_reserved:3;
+
+	/**
+	 * This field is set to true when the SCU hardware task timeout control is to
+	 * be enabled
+	 */
+	u32 timeout_enable:1;
+
+	/**
+	 * This field is reserved and the driver should set it to 0x00
+	 */
+	u32 pts_control_reserved:7;
+
+	/**
+	 * This field should be set to true when block guard is to be enabled
+	 */
+	u32 block_guard_enable:1;
+
+	/**
+	 * This field is reserved and the driver should set to 0x00
+	 */
+	u32 sdma_control_reserved:7;
+
+	/* OFFSET 0x0C */
+	/**
+	 * This field is the address modifier for this io request it should be
+	 * programmed with the virtual function that is making the request.
+	 */
+	u32 address_modifier:16;
+
+	/**
+	 * @todo What we support mirrored SMP response frame?
+	 */
+	u32 mirrored_protocol_engine:3;  /* mirrored protocol Engine Index */
+
+	/**
+	 * If this is a mirrored request the logical port index for the mirrored RNi
+	 * must be programmed.
+	 */
+	u32 mirrored_logical_port:4;  /* mirrored local port index */
+
+	/**
+	 * This field is reserved and the driver must set it to 0x00
+	 */
+	u32 reserved_0C_0:8;
+
+	/**
+	 * This field must be set to true if the mirrored request processing is to be
+	 * enabled.
+	 */
+	u32 mirror_request_enable:1;  /* Mirrored request Enable */
+
+	/* OFFSET 0x10 */
+	/**
+	 * This field is the command iu length in dwords
+	 */
+	u32 ssp_command_iu_length:8;
+
+	/**
+	 * This is the target TLR enable bit it must be set to 0 when creatning the
+	 * task context.
+	 */
+	u32 xfer_ready_tlr_enable:1;
+
+	/**
+	 * This field is reserved and the driver must set it to 0x00
+	 */
+	u32 reserved_10_0:7;
+
+	/**
+	 * This is the maximum burst size that the SCU hardware will send in one
+	 * connection its value is (N x 512) and N must be a multiple of 2.  If the
+	 * value is 0x00 then maximum burst size is disabled.
+	 */
+	u32 ssp_max_burst_size:16;
+
+	/* OFFSET 0x14 */
+	/**
+	 * This filed is set to the number of bytes to be transfered in the request.
+	 */
+	u32 transfer_length_bytes:24; /* In terms of bytes */
+
+	/**
+	 * This field is reserved and the driver should set it to 0x00
+	 */
+	u32 reserved_14_0:8;
+
+	/* OFFSET 0x18-0x2C */
+	/**
+	 * This union provides for the protocol specif part of the SCU Task Context.
+	 */
+	union protocol_context type;
+
+	/* OFFSET 0x30-0x34 */
+	/**
+	 * This field is the upper 32 bits of the 64 bit physical address of the
+	 * command iu buffer
+	 */
+	u32 command_iu_upper;
+
+	/**
+	 * This field is the lower 32 bits of the 64 bit physical address of the
+	 * command iu buffer
+	 */
+	u32 command_iu_lower;
+
+	/* OFFSET 0x38-0x3C */
+	/**
+	 * This field is the upper 32 bits of the 64 bit physical address of the
+	 * response iu buffer
+	 */
+	u32 response_iu_upper;
+
+	/**
+	 * This field is the lower 32 bits of the 64 bit physical address of the
+	 * response iu buffer
+	 */
+	u32 response_iu_lower;
+
+	/* OFFSET 0x40 */
+	/**
+	 * This field is set to the task phase of the SCU hardware. The driver must
+	 * set this to 0x01
+	 */
+	u32 task_phase:8;
+
+	/**
+	 * This field is set to the transport layer task status.  The driver must set
+	 * this to 0x00
+	 */
+	u32 task_status:8;
+
+	/**
+	 * This field is used during initiator write TLR
+	 */
+	u32 previous_extended_tag:4;
+
+	/**
+	 * This field is set the maximum number of retries for a STP non-data FIS
+	 */
+	u32 stp_retry_count:2;
+
+	/**
+	 * This field is reserved and the driver must set it to 0x00
+	 */
+	u32 reserved_40_1:2;
+
+	/**
+	 * This field is used by the SCU TL to determine when to take a snapshot when
+	 * tranmitting read data frames.
+	 *    - 0x00 The entire IO
+	 *    - 0x01 32k
+	 *    - 0x02 64k
+	 *    - 0x04 128k
+	 *    - 0x08 256k
+	 */
+	u32 ssp_tlr_threshold:4;
+
+	/**
+	 * This field is reserved and the driver must set it to 0x00
+	 */
+	u32 reserved_40_2:4;
+
+	/* OFFSET 0x44 */
+	u32 write_data_length; /* read only set to 0 */
+
+	/* OFFSET 0x48-0x58 */
+	struct transport_snapshot snapshot; /* read only set to 0 */
+
+	/* OFFSET 0x5C */
+	u32 blk_prot_en:1;
+	u32 blk_sz:2;
+	u32 blk_prot_func:2;
+	u32 reserved_5C_0:9;
+	u32 active_sgl_element:2;  /* read only set to 0 */
+	u32 sgl_exhausted:1;  /* read only set to 0 */
+	u32 payload_data_transfer_error:4;  /* read only set to 0 */
+	u32 frame_buffer_offset:11; /* read only set to 0 */
+
+	/* OFFSET 0x60-0x7C */
+	/**
+	 * This field is the first SGL element pair found in the TC data structure.
+	 */
+	struct scu_sgl_element_pair sgl_pair_ab;
+	/* OFFSET 0x80-0x9C */
+	/**
+	 * This field is the second SGL element pair found in the TC data structure.
+	 */
+	struct scu_sgl_element_pair sgl_pair_cd;
+
+	/* OFFSET 0xA0-BC */
+	struct scu_sgl_element_pair sgl_snapshot_ac;
+
+	/* OFFSET 0xC0 */
+	u32 active_sgl_element_pair; /* read only set to 0 */
+
+	/* OFFSET 0xC4-0xCC */
+	u32 reserved_C4_CC[3];
+
+	/* OFFSET 0xD0 */
+	u32 interm_crc_val:16;
+	u32 init_crc_seed:16;
+
+	/* OFFSET 0xD4 */
+	u32 app_tag_verify:16;
+	u32 app_tag_gen:16;
+
+	/* OFFSET 0xD8 */
+	u32 ref_tag_seed_verify;
+
+	/* OFFSET 0xDC */
+	u32 UD_bytes_immed_val:13;
+	u32 reserved_DC_0:3;
+	u32 DIF_bytes_immed_val:4;
+	u32 reserved_DC_1:12;
+
+	/* OFFSET 0xE0 */
+	u32 bgc_blk_sz:13;
+	u32 reserved_E0_0:3;
+	u32 app_tag_gen_mask:16;
+
+	/* OFFSET 0xE4 */
+	union {
+		u16 bgctl;
+		struct {
+			u16 crc_verify:1;
+			u16 app_tag_chk:1;
+			u16 ref_tag_chk:1;
+			u16 op:2;
+			u16 legacy:1;
+			u16 invert_crc_seed:1;
+			u16 ref_tag_gen:1;
+			u16 fixed_ref_tag:1;
+			u16 invert_crc:1;
+			u16 app_ref_f_detect:1;
+			u16 uninit_dif_check_err:1;
+			u16 uninit_dif_bypass:1;
+			u16 app_f_detect:1;
+			u16 reserved_0:2;
+		} bgctl_f;
+	};
+
+	u16 app_tag_verify_mask;
+
+	/* OFFSET 0xE8 */
+	u32 blk_guard_err:8;
+	u32 reserved_E8_0:24;
+
+	/* OFFSET 0xEC */
+	u32 ref_tag_seed_gen;
+
+	/* OFFSET 0xF0 */
+	u32 intermediate_crc_valid_snapshot:16;
+	u32 reserved_F0_0:16;
+
+	/* OFFSET 0xF4 */
+	u32 reference_tag_seed_for_verify_function_snapshot;
+
+	/* OFFSET 0xF8 */
+	u32 snapshot_of_reserved_dword_DC_of_tc;
+
+	/* OFFSET 0xFC */
+	u32 reference_tag_seed_for_generate_function_snapshot;
+
+} __packed;
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 0000000..6dcaed0
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,805 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+*     the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+			     enum service_response response,
+			     enum exec_status status)
+
+{
+	unsigned long flags;
+
+	/* Normal notification (task_done) */
+	dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
+		__func__, task, response, status);
+
+	spin_lock_irqsave(&task->task_state_lock, flags);
+
+	task->task_status.resp = response;
+	task->task_status.stat = status;
+
+	/* Normal notification (task_done) */
+	task->task_state_flags |= SAS_TASK_STATE_DONE;
+	task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+				    SAS_TASK_STATE_PENDING);
+	task->lldd_task = NULL;
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+	task->task_done(task);
+}
+
+#define for_each_sas_task(num, task) \
+	for (; num > 0; num--,\
+	     task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+				       struct sas_task *task)
+{
+	return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+		      (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+		       isci_task_is_ncq_recovery(task))
+		    : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ *    functions. This function is called by libsas to send a task down to
+ *    hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
+{
+	struct isci_host *ihost = dev_to_ihost(task->dev);
+	struct isci_remote_device *idev;
+	unsigned long flags;
+	enum sci_status status = SCI_FAILURE;
+	bool io_ready;
+	u16 tag;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev = isci_lookup_device(task->dev);
+	io_ready = isci_device_io_ready(idev, task);
+	tag = isci_alloc_tag(ihost);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	dev_dbg(&ihost->pdev->dev,
+		"task: %p, dev: %p idev: %p:%#lx cmd = %p\n",
+		task, task->dev, idev, idev ? idev->flags : 0,
+		task->uldd_task);
+
+	if (!idev) {
+		isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+				 SAS_DEVICE_UNKNOWN);
+	} else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+		/* Indicate QUEUE_FULL so that the scsi midlayer
+		 * retries.
+		  */
+		isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+				 SAS_QUEUE_FULL);
+	} else {
+		/* There is a device and it's ready for I/O. */
+		spin_lock_irqsave(&task->task_state_lock, flags);
+
+		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+			/* The I/O was aborted. */
+			spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+			isci_task_refuse(ihost, task,
+					 SAS_TASK_UNDELIVERED,
+					 SAM_STAT_TASK_ABORTED);
+		} else {
+			task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+			spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+			/* build and send the request. */
+			status = isci_request_execute(ihost, idev, task, tag);
+
+			if (status != SCI_SUCCESS) {
+				spin_lock_irqsave(&task->task_state_lock, flags);
+				/* Did not really start this command. */
+				task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+				spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+				if (test_bit(IDEV_GONE, &idev->flags)) {
+					/* Indicate that the device
+					 * is gone.
+					 */
+					isci_task_refuse(ihost, task,
+						SAS_TASK_UNDELIVERED,
+						SAS_DEVICE_UNKNOWN);
+				} else {
+					/* Indicate QUEUE_FULL so that
+					 * the scsi midlayer retries.
+					 * If the request failed for
+					 * remote device reasons, it
+					 * gets returned as
+					 * SAS_TASK_UNDELIVERED next
+					 * time through.
+					 */
+					isci_task_refuse(ihost, task,
+						SAS_TASK_COMPLETE,
+						SAS_QUEUE_FULL);
+				}
+			}
+		}
+	}
+
+	if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+		spin_lock_irqsave(&ihost->scic_lock, flags);
+		/* command never hit the device, so just free
+		 * the tci and skip the sequence increment
+		 */
+		isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+	}
+
+	isci_put_device(idev);
+	return 0;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+						    struct isci_remote_device *idev,
+						    u16 tag, struct isci_tmf *isci_tmf)
+{
+	enum sci_status status = SCI_FAILURE;
+	struct isci_request *ireq = NULL;
+	struct domain_device *dev;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+	dev = idev->domain_dev;
+
+	/* do common allocation and init of request object. */
+	ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+	if (!ireq)
+		return NULL;
+
+	/* let the core do it's construct. */
+	status = sci_task_request_construct(ihost, idev, tag,
+					     ireq);
+
+	if (status != SCI_SUCCESS) {
+		dev_warn(&ihost->pdev->dev,
+			 "%s: sci_task_request_construct failed - "
+			 "status = 0x%x\n",
+			 __func__,
+			 status);
+		return NULL;
+	}
+
+	/* XXX convert to get this from task->tproto like other drivers */
+	if (dev->dev_type == SAS_END_DEVICE) {
+		isci_tmf->proto = SAS_PROTOCOL_SSP;
+		status = sci_task_request_construct_ssp(ireq);
+		if (status != SCI_SUCCESS)
+			return NULL;
+	}
+
+	return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+				 struct isci_remote_device *idev,
+				 struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	enum sci_task_status status = SCI_TASK_FAILURE;
+	struct isci_request *ireq;
+	int ret = TMF_RESP_FUNC_FAILED;
+	unsigned long flags;
+	unsigned long timeleft;
+	u16 tag;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	tag = isci_alloc_tag(ihost);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+		return ret;
+
+	/* sanity check, return TMF_RESP_FUNC_FAILED
+	 * if the device is not there and ready.
+	 */
+	if (!idev ||
+	    (!test_bit(IDEV_IO_READY, &idev->flags) &&
+	     !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+		dev_dbg(&ihost->pdev->dev,
+			"%s: idev = %p not ready (%#lx)\n",
+			__func__,
+			idev, idev ? idev->flags : 0);
+		goto err_tci;
+	} else
+		dev_dbg(&ihost->pdev->dev,
+			"%s: idev = %p\n",
+			__func__, idev);
+
+	/* Assign the pointer to the TMF's completion kernel wait structure. */
+	tmf->complete = &completion;
+	tmf->status = SCI_FAILURE_TIMEOUT;
+
+	ireq = isci_task_request_build(ihost, idev, tag, tmf);
+	if (!ireq)
+		goto err_tci;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	/* start the TMF io. */
+	status = sci_controller_start_task(ihost, idev, ireq);
+
+	if (status != SCI_TASK_SUCCESS) {
+		dev_dbg(&ihost->pdev->dev,
+			 "%s: start_io failed - status = 0x%x, request = %p\n",
+			 __func__,
+			 status,
+			 ireq);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		goto err_tci;
+	}
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	/* The RNC must be unsuspended before the TMF can get a response. */
+	isci_remote_device_resume_from_abort(ihost, idev);
+
+	/* Wait for the TMF to complete, or a timeout. */
+	timeleft = wait_for_completion_timeout(&completion,
+					       msecs_to_jiffies(timeout_ms));
+
+	if (timeleft == 0) {
+		/* The TMF did not complete - this could be because
+		 * of an unplug.  Terminate the TMF request now.
+		 */
+		isci_remote_device_suspend_terminate(ihost, idev, ireq);
+	}
+
+	isci_print_tmf(ihost, tmf);
+
+	if (tmf->status == SCI_SUCCESS)
+		ret =  TMF_RESP_FUNC_COMPLETE;
+	else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+		dev_dbg(&ihost->pdev->dev,
+			"%s: tmf.status == "
+			"SCI_FAILURE_IO_RESPONSE_VALID\n",
+			__func__);
+		ret =  TMF_RESP_FUNC_COMPLETE;
+	}
+	/* Else - leave the default "failed" status alone. */
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: completed request = %p\n",
+		__func__,
+		ireq);
+
+	return ret;
+
+ err_tci:
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+				enum isci_tmf_function_codes code)
+{
+	memset(tmf, 0, sizeof(*tmf));
+	tmf->tmf_code = code;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+					   enum isci_tmf_function_codes code,
+					   struct isci_request *old_request)
+{
+	isci_task_build_tmf(tmf, code);
+	tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ *    Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+	struct isci_host *isci_host,
+	struct isci_remote_device *isci_device,
+	u8 *lun)
+{
+	struct isci_tmf tmf;
+	int ret = TMF_RESP_FUNC_FAILED;
+
+	dev_dbg(&isci_host->pdev->dev,
+		"%s: isci_host = %p, isci_device = %p\n",
+		__func__, isci_host, isci_device);
+	/* Send the LUN reset to the target.  By the time the call returns,
+	 * the TMF has fully exected in the target (in which case the return
+	 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+	 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+	 */
+	isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
+
+	#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+	ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+	if (ret == TMF_RESP_FUNC_COMPLETE)
+		dev_dbg(&isci_host->pdev->dev,
+			"%s: %p: TMF_LU_RESET passed\n",
+			__func__, isci_device);
+	else
+		dev_dbg(&isci_host->pdev->dev,
+			"%s: %p: TMF_LU_RESET failed (%x)\n",
+			__func__, isci_device, ret);
+
+	return ret;
+}
+
+int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
+{
+	struct isci_host *ihost = dev_to_ihost(dev);
+	struct isci_remote_device *idev;
+	unsigned long flags;
+	int ret = TMF_RESP_FUNC_COMPLETE;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev = isci_get_device(dev->lldd_dev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+		__func__, dev, ihost, idev);
+
+	if (!idev) {
+		/* If the device is gone, escalate to I_T_Nexus_Reset. */
+		dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
+
+		ret = TMF_RESP_FUNC_FAILED;
+		goto out;
+	}
+
+	/* Suspend the RNC, kill all TCs */
+	if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+	    != SCI_SUCCESS) {
+		/* The suspend/terminate only fails if isci_get_device fails */
+		ret = TMF_RESP_FUNC_FAILED;
+		goto out;
+	}
+	/* All pending I/Os have been terminated and cleaned up. */
+	if (!test_bit(IDEV_GONE, &idev->flags)) {
+		if (dev_is_sata(dev))
+			sas_ata_schedule_reset(dev);
+		else
+			/* Send the task management part of the reset. */
+			ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
+	}
+ out:
+	isci_put_device(idev);
+	return ret;
+}
+
+
+/*	 int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+	return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+	return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context.	 */
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ *    functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+	struct isci_host *ihost = dev_to_ihost(task->dev);
+	DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+	struct isci_request       *old_request = NULL;
+	struct isci_remote_device *idev = NULL;
+	struct isci_tmf           tmf;
+	int                       ret = TMF_RESP_FUNC_FAILED;
+	unsigned long             flags;
+	int                       target_done_already = 0;
+
+	/* Get the isci_request reference from the task.  Note that
+	 * this check does not depend on the pending request list
+	 * in the device, because tasks driving resets may land here
+	 * after completion in the core.
+	 */
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	spin_lock(&task->task_state_lock);
+
+	old_request = task->lldd_task;
+
+	/* If task is already done, the request isn't valid */
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+	    (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+	    old_request) {
+		idev = isci_get_device(task->dev->lldd_dev);
+		target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
+					       &old_request->flags);
+	}
+	spin_unlock(&task->task_state_lock);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	dev_warn(&ihost->pdev->dev,
+		 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
+		 __func__, idev,
+		 (dev_is_sata(task->dev) ? "STP/SATA"
+					 : ((dev_is_expander(task->dev))
+						? "SMP"
+						: "SSP")),
+		 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
+			   ? " IDEV_GONE"
+			   : "")
+			 : " <NULL>"),
+		 task, old_request);
+
+	/* Device reset conditions signalled in task_state_flags are the
+	 * responsbility of libsas to observe at the start of the error
+	 * handler thread.
+	 */
+	if (!idev || !old_request) {
+		/* The request has already completed and there
+		* is nothing to do here other than to set the task
+		* done bit, and indicate that the task abort function
+		* was successful.
+		*/
+		spin_lock_irqsave(&task->task_state_lock, flags);
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+		ret = TMF_RESP_FUNC_COMPLETE;
+
+		dev_warn(&ihost->pdev->dev,
+			 "%s: abort task not needed for %p\n",
+			 __func__, task);
+		goto out;
+	}
+	/* Suspend the RNC, kill the TC */
+	if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
+	    != SCI_SUCCESS) {
+		dev_warn(&ihost->pdev->dev,
+			 "%s: isci_remote_device_reset_terminate(dev=%p, "
+				 "req=%p, task=%p) failed\n",
+			 __func__, idev, old_request, task);
+		ret = TMF_RESP_FUNC_FAILED;
+		goto out;
+	}
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (task->task_proto == SAS_PROTOCOL_SMP ||
+	    sas_protocol_ata(task->task_proto) ||
+	    target_done_already ||
+	    test_bit(IDEV_GONE, &idev->flags)) {
+
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+		/* No task to send, so explicitly resume the device here */
+		isci_remote_device_resume_from_abort(ihost, idev);
+
+		dev_warn(&ihost->pdev->dev,
+			 "%s: %s request"
+				 " or complete_in_target (%d), "
+				 "or IDEV_GONE (%d), thus no TMF\n",
+			 __func__,
+			 ((task->task_proto == SAS_PROTOCOL_SMP)
+			  ? "SMP"
+			  : (sas_protocol_ata(task->task_proto)
+				? "SATA/STP"
+				: "<other>")
+			  ),
+			 test_bit(IREQ_COMPLETE_IN_TARGET,
+				  &old_request->flags),
+			 test_bit(IDEV_GONE, &idev->flags));
+
+		spin_lock_irqsave(&task->task_state_lock, flags);
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+		ret = TMF_RESP_FUNC_COMPLETE;
+	} else {
+		/* Fill in the tmf structure */
+		isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+					       old_request);
+
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+		/* Send the task management request. */
+		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
+		ret = isci_task_execute_tmf(ihost, idev, &tmf,
+					    ISCI_ABORT_TASK_TIMEOUT_MS);
+	}
+out:
+	dev_warn(&ihost->pdev->dev,
+		 "%s: Done; dev = %p, task = %p , old_request == %p\n",
+		 __func__, idev, task, old_request);
+	isci_put_device(idev);
+	return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas,
+ *    to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+	struct domain_device *d_device,
+	u8 *lun)
+{
+	return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun	 associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+	struct domain_device *d_device,
+	u8 *lun)
+{
+	return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun	 associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+	struct domain_device *d_device,
+	u8 *lun)
+{
+	return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ *    correctly escalate the failed abort to a LUN or target reset (this is
+ *    because sas_scsi_find_task libsas function does not correctly interpret
+ *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
+ *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ *    returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+	struct sas_task *task)
+{
+	/* See if there is a pending device reset for this device. */
+	if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+		return TMF_RESP_FUNC_FAILED;
+	else
+		return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ *    an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ *    sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+			   struct isci_request *ireq,
+			   enum sci_task_status completion_status)
+{
+	struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+	struct completion *tmf_complete = NULL;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: request = %p, status=%d\n",
+		__func__, ireq, completion_status);
+
+	set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+	if (tmf) {
+		tmf->status = completion_status;
+
+		if (tmf->proto == SAS_PROTOCOL_SSP) {
+			memcpy(&tmf->resp.resp_iu,
+			       &ireq->ssp.rsp,
+			       SSP_RESP_IU_MAX_SIZE);
+		} else if (tmf->proto == SAS_PROTOCOL_SATA) {
+			memcpy(&tmf->resp.d2h_fis,
+			       &ireq->stp.rsp,
+			       sizeof(struct dev_to_host_fis));
+		}
+		/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+		tmf_complete = tmf->complete;
+	}
+	sci_controller_complete_io(ihost, ireq->target_device, ireq);
+	/* set the 'terminated' flag handle to make sure it cannot be terminated
+	 *  or completed again.
+	 */
+	set_bit(IREQ_TERMINATED, &ireq->flags);
+
+	if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+		wake_up_all(&ihost->eventq);
+
+	if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+		isci_free_tag(ihost, ireq->io_tag);
+
+	/* The task management part completes last. */
+	if (tmf_complete)
+		complete(tmf_complete);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+			     struct domain_device *dev,
+			     struct isci_remote_device *idev)
+{
+	int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
+	struct sas_phy *phy = sas_get_local_phy(dev);
+	struct isci_port *iport = dev->port->lldd_port;
+
+	dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+	/* Suspend the RNC, terminate all outstanding TCs. */
+	if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+	    != SCI_SUCCESS) {
+		rc = TMF_RESP_FUNC_FAILED;
+		goto out;
+	}
+	/* Note that since the termination for outstanding requests succeeded,
+	 * this function will return success.  This is because the resets will
+	 * only fail if the device has been removed (ie. hotplug), and the
+	 * primary duty of this function is to cleanup tasks, so that is the
+	 * relevant status.
+	 */
+	if (!test_bit(IDEV_GONE, &idev->flags)) {
+		if (scsi_is_sas_phy_local(phy)) {
+			struct isci_phy *iphy = &ihost->phys[phy->number];
+
+			reset_stat = isci_port_perform_hard_reset(ihost, iport,
+								  iphy);
+		} else
+			reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
+	}
+	/* Explicitly resume the RNC here, since there was no task sent. */
+	isci_remote_device_resume_from_abort(ihost, idev);
+
+	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
+		__func__, idev, reset_stat);
+ out:
+	sas_put_local_phy(phy);
+	return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+	struct isci_host *ihost = dev_to_ihost(dev);
+	struct isci_remote_device *idev;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev = isci_get_device(dev->lldd_dev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (!idev) {
+		/* XXX: need to cleanup any ireqs targeting this
+		 * domain_device
+		 */
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = isci_reset_device(ihost, dev, idev);
+ out:
+	isci_put_device(idev);
+	return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 0000000..8f4531f
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,189 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+#define ISCI_TERMINATION_TIMEOUT_MSEC 500
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ *    of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+	isci_tmf_func_none      = 0,
+	isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+	isci_tmf_ssp_lun_reset  = TMF_LU_RESET,
+};
+
+/**
+ * struct isci_tmf - This class represents the task management object which
+ *    acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+	struct completion *complete;
+	enum sas_protocol proto;
+	union {
+		struct ssp_response_iu resp_iu;
+		struct dev_to_host_fis d2h_fis;
+		u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+	} resp;
+	unsigned char lun[8];
+	u16 io_tag;
+	enum isci_tmf_function_codes tmf_code;
+	int status;
+};
+
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
+{
+	if (SAS_PROTOCOL_SATA == tmf->proto)
+		dev_dbg(&ihost->pdev->dev,
+			"%s: status = %x\n"
+			"tmf->resp.d2h_fis.status = %x\n"
+			"tmf->resp.d2h_fis.error = %x\n",
+			__func__,
+			tmf->status,
+			tmf->resp.d2h_fis.status,
+			tmf->resp.d2h_fis.error);
+	else
+		dev_dbg(&ihost->pdev->dev,
+			"%s: status = %x\n"
+			"tmf->resp.resp_iu.data_present = %x\n"
+			"tmf->resp.resp_iu.status = %x\n"
+			"tmf->resp.resp_iu.data_length = %x\n"
+			"tmf->resp.resp_iu.data[0] = %x\n"
+			"tmf->resp.resp_iu.data[1] = %x\n"
+			"tmf->resp.resp_iu.data[2] = %x\n"
+			"tmf->resp.resp_iu.data[3] = %x\n",
+			__func__,
+			tmf->status,
+			tmf->resp.resp_iu.datapres,
+			tmf->resp.resp_iu.status,
+			be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+			tmf->resp.resp_iu.resp_data[0],
+			tmf->resp.resp_iu.resp_data[1],
+			tmf->resp.resp_iu.resp_data[2],
+			tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+	struct sas_task *task,
+	gfp_t gfp_flags);
+
+int isci_task_abort_task(
+	struct sas_task *task);
+
+int isci_task_abort_task_set(
+	struct domain_device *d_device,
+	u8 *lun);
+
+int isci_task_clear_aca(
+	struct domain_device *d_device,
+	u8 *lun);
+
+int isci_task_clear_task_set(
+	struct domain_device *d_device,
+	u8 *lun);
+
+int isci_task_query_task(
+	struct sas_task *task);
+
+int isci_task_lu_reset(
+	struct domain_device *d_device,
+	u8 *lun);
+
+int isci_task_clear_nexus_port(
+	struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+	struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+	struct domain_device *d_device);
+
+void isci_task_request_complete(
+	struct isci_host *isci_host,
+	struct isci_request *request,
+	enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+	struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+	struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+	struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+	struct isci_request *request);
+
+int isci_queuecommand(
+	struct scsi_cmnd *scsi_cmd,
+	void (*donefunc)(struct scsi_cmnd *));
+
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 0000000..04a6d0d
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,211 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+	struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+	struct sci_unsolicited_frame *uf;
+	dma_addr_t dma = ihost->ufi_dma;
+	void *virt = ihost->ufi_buf;
+	int i;
+
+	/*
+	 * The Unsolicited Frame buffers are set at the start of the UF
+	 * memory descriptor entry. The headers and address table will be
+	 * placed after the buffers.
+	 */
+
+	/*
+	 * Program the location of the UF header table into the SCU.
+	 * Notes:
+	 * - The address must align on a 64-byte boundary. Guaranteed to be
+	 *   on 64-byte boundary already 1KB boundary for unsolicited frames.
+	 * - Program unused header entries to overlap with the last
+	 *   unsolicited frame.  The silicon will never DMA to these unused
+	 *   headers, since we program the UF address table pointers to
+	 *   NULL.
+	 */
+	uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
+	uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
+
+	/*
+	 * Program the location of the UF address table into the SCU.
+	 * Notes:
+	 * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+	 *   byte boundary already due to above programming headers being on a
+	 *   64-bit boundary and headers are on a 64-bytes in size.
+	 */
+	uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+	uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+	uf_control->get = 0;
+
+	/*
+	 * UF buffer requirements are:
+	 * - The last entry in the UF queue is not NULL.
+	 * - There is a power of 2 number of entries (NULL or not-NULL)
+	 *   programmed into the queue.
+	 * - Aligned on a 1KB boundary. */
+
+	/*
+	 * Program the actual used UF buffers into the UF address table and
+	 * the controller's array of UFs.
+	 */
+	for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+		uf = &uf_control->buffers.array[i];
+
+		uf_control->address_table.array[i] = dma;
+
+		uf->buffer = virt;
+		uf->header = &uf_control->headers.array[i];
+		uf->state  = UNSOLICITED_FRAME_EMPTY;
+
+		/*
+		 * Increment the address of the physical and virtual memory
+		 * pointers. Everything is aligned on 1k boundary with an
+		 * increment of 1k.
+		 */
+		virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+		dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+	}
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+							 u32 frame_index,
+							 void **frame_header)
+{
+	if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+		/* Skip the first word in the frame since this is a controll word used
+		 * by the hardware.
+		 */
+		*frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+		return SCI_SUCCESS;
+	}
+
+	return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+							 u32 frame_index,
+							 void **frame_buffer)
+{
+	if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+		*frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+		return SCI_SUCCESS;
+	}
+
+	return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+						 u32 frame_index)
+{
+	u32 frame_get;
+	u32 frame_cycle;
+
+	frame_get   = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+	frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+	/*
+	 * In the event there are NULL entries in the UF table, we need to
+	 * advance the get pointer in order to find out if this frame should
+	 * be released (i.e. update the get pointer)
+	 */
+	while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+	       upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+	       frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+		frame_get++;
+
+	/*
+	 * The table has a NULL entry as it's last element.  This is
+	 * illegal.
+	 */
+	BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+	if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+		return false;
+
+	uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+	if (frame_get != frame_index) {
+		/*
+		 * Frames remain in use until we advance the get pointer
+		 * so there is nothing we can do here
+		 */
+		return false;
+	}
+
+	/*
+	 * The frame index is equal to the current get pointer so we
+	 * can now free up all of the frame entries that
+	 */
+	while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+		uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+		if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+			frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+			frame_get = 0;
+		} else
+			frame_get++;
+	}
+
+	uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+	return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 0000000..1bc551e
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,282 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+	/**
+	 * This field indicates if there is an Initiator Index Table entry with
+	 * which this header is associated.
+	 */
+	u32 iit_exists:1;
+
+	/**
+	 * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+	 */
+	u32 protocol_type:3;
+
+	/**
+	 * This field indicates if the frame is an address frame (IAF or OAF)
+	 * or if it is a information unit frame.
+	 */
+	u32 is_address_frame:1;
+
+	/**
+	 * This field simply indicates the connection rate at which the frame
+	 * was received.
+	 */
+	u32 connection_rate:4;
+
+	u32 reserved:23;
+
+	/**
+	 * This field represents the actual header data received on the link.
+	 */
+	u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state.  The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+	/**
+	 * This state is when the frame is empty and not in use.  It is
+	 * different from the released state in that the hardware could DMA
+	 * data to this frame buffer.
+	 */
+	UNSOLICITED_FRAME_EMPTY,
+
+	/**
+	 * This state is set when the frame buffer is in use by by some
+	 * object in the system.
+	 */
+	UNSOLICITED_FRAME_IN_USE,
+
+	/**
+	 * This state is set when the frame is returned to the free pool
+	 * but one or more frames prior to this one are still in use.
+	 * Once all of the frame before this one are freed it will go to
+	 * the empty state.
+	 */
+	UNSOLICITED_FRAME_RELEASED,
+
+	UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+	/**
+	 * This field contains the current frame state
+	 */
+	enum unsolicited_frame_state state;
+
+	/**
+	 * This field points to the frame header data.
+	 */
+	struct scu_unsolicited_frame_header *header;
+
+	/**
+	 * This field points to the frame buffer data.
+	 */
+	void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+	/**
+	 * This field is represents a virtual pointer to the start
+	 * address of the UF address table.  The table contains
+	 * 64-bit pointers as required by the hardware.
+	 */
+	struct scu_unsolicited_frame_header *array;
+
+	/**
+	 * This field specifies the physical address location for the UF
+	 * buffer array.
+	 */
+	dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+	/**
+	 * This field is the unsolicited frame data its used to manage
+	 * the data for the unsolicited frame requests.  It also represents
+	 * the virtual address location that corresponds to the
+	 * physical_address field.
+	 */
+	struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+	/**
+	 * This field specifies the physical address location for the UF
+	 * buffer array.
+	 */
+	dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data.  The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+	/**
+	 * This field represents a virtual pointer that refers to the
+	 * starting address of the UF address table.
+	 * 64-bit pointers are required by the hardware.
+	 */
+	u64 *array;
+
+	/**
+	 * This field specifies the physical address location for the UF
+	 * address table.
+	 */
+	dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+	/**
+	 * This field is the software copy of the unsolicited frame queue
+	 * get pointer.  The controller object writes this value to the
+	 * hardware to let the hardware put more unsolicited frame entries.
+	 */
+	u32 get;
+
+	/**
+	 * This field contains all of the unsolicited frame header
+	 * specific fields.
+	 */
+	struct sci_uf_header_array headers;
+
+	/**
+	 * This field contains all of the unsolicited frame buffer
+	 * specific fields.
+	 */
+	struct sci_uf_buffer_array buffers;
+
+	/**
+	 * This field contains all of the unsolicited frame address table
+	 * specific fields.
+	 */
+	struct sci_uf_address_table_array address_table;
+
+};
+
+#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
+#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
+#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
+
+struct isci_host;
+
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+	struct sci_unsolicited_frame_control *uf_control,
+	u32 frame_index,
+	void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+	struct sci_unsolicited_frame_control *uf_control,
+	u32 frame_index,
+	void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+	struct sci_unsolicited_frame_control *uf_control,
+	u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */